diff --git a/src/aosm/HISTORY.rst b/src/aosm/HISTORY.rst index c02cf98ffa9..93d5b19675a 100644 --- a/src/aosm/HISTORY.rst +++ b/src/aosm/HISTORY.rst @@ -3,12 +3,16 @@ Release History =============== +1.0.0b3 +++++++++ +* Move azure-storage-blob dependency to vendored_sdks (on advice from Azure CLI team to avoid 'azure' namespace issues) + +1.0.0b2 +++++++++ +* Fixed: Use default_factory when a dataclass default is hashable (Python 3.11 compatibility) + 1.0.0b1 ++++++++ * Initial release - beta quality * `az aosm nfd|nsd generate-config` to generate an example config file to fill in for an NFD or NSD * `az aosm nfd|nsd build|publish|delete` to prepare files for, publish or delete an NFD or NSD - -1.0.0b2 -++++++++ -* Fixed: Use default_factory when a dataclass default is hashable (Python 3.11 compatibility) \ No newline at end of file diff --git a/src/aosm/azext_aosm/delete/delete.py b/src/aosm/azext_aosm/delete/delete.py index 9d990e647ba..561ce51e04e 100644 --- a/src/aosm/azext_aosm/delete/delete.py +++ b/src/aosm/azext_aosm/delete/delete.py @@ -3,10 +3,11 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- """Contains class for deploying generated definitions using the Python SDK.""" +from time import sleep from typing import Optional -from knack.log import get_logger from azure.cli.core.commands import LongRunningOperation +from azure.core.exceptions import ResourceExistsError from azext_aosm._configuration import ( Configuration, NFConfiguration, @@ -15,6 +16,7 @@ ) from azext_aosm.util.management_clients import ApiClients from azext_aosm.util.utils import input_ack +from knack.log import get_logger logger = get_logger(__name__) @@ -296,16 +298,29 @@ def delete_publisher(self) -> None: message = f"Delete Publisher {self.config.publisher_name}" logger.debug(message) print(message) - try: - poller = self.api_clients.aosm_client.publishers.begin_delete( - resource_group_name=self.config.publisher_resource_group_name, - publisher_name=self.config.publisher_name, - ) - LongRunningOperation(self.cli_ctx, "Deleting Publisher...")(poller) - logger.info("Deleted Publisher") - except Exception: - logger.error("Failed to delete publisher") - raise + # Occasionally nested resources that have just been deleted (e.g. artifact store) will + # still appear to exist, raising ResourceExistsError. We handle this by retrying up to + # 6 times, with a 30 second wait between each. + for attempt in range(6): + try: + poller = self.api_clients.aosm_client.publishers.begin_delete( + resource_group_name=self.config.publisher_resource_group_name, + publisher_name=self.config.publisher_name, + ) + LongRunningOperation(self.cli_ctx, "Deleting Publisher...")(poller) + logger.info("Deleted Publisher") + break + except ResourceExistsError: + if attempt == 5: + logger.error("Failed to delete publisher") + raise + logger.debug( + "ResourceExistsError: This may be nested resource is not finished deleting. Wait and retry." + ) + sleep(30) + except Exception: + logger.error("Failed to delete publisher") + raise def delete_config_group_schema(self) -> None: """Delete the Configuration Group Schema.""" diff --git a/src/aosm/azext_aosm/deploy/artifact.py b/src/aosm/azext_aosm/deploy/artifact.py index 26c719c432b..ce8f2daa58f 100644 --- a/src/aosm/azext_aosm/deploy/artifact.py +++ b/src/aosm/azext_aosm/deploy/artifact.py @@ -12,7 +12,6 @@ from dataclasses import dataclass from typing import Any, Optional, Union -from azure.storage.blob import BlobClient, BlobType from knack.log import get_logger from knack.util import CLIError from oras.client import OrasClient @@ -23,6 +22,8 @@ HelmPackageConfig, VhdArtifactConfig, ) +from azext_aosm.vendored_sdks.azure_storagev2.blob.v2022_11_02 import ( + BlobClient, BlobType) logger = get_logger(__name__) diff --git a/src/aosm/azext_aosm/deploy/artifact_manifest.py b/src/aosm/azext_aosm/deploy/artifact_manifest.py index 3ca14ca3782..ba0dc51a70c 100644 --- a/src/aosm/azext_aosm/deploy/artifact_manifest.py +++ b/src/aosm/azext_aosm/deploy/artifact_manifest.py @@ -7,7 +7,6 @@ from typing import Any, List, Union from azure.cli.core.azclierror import AzCLIError -from azure.storage.blob import BlobClient from knack.log import get_logger from oras.client import OrasClient @@ -20,6 +19,7 @@ CredentialType, ManifestArtifactFormat, ) +from azext_aosm.vendored_sdks.azure_storagev2.blob.v2022_11_02 import BlobClient logger = get_logger(__name__) diff --git a/src/aosm/azext_aosm/generate_nfd/cnf_nfd_generator.py b/src/aosm/azext_aosm/generate_nfd/cnf_nfd_generator.py index 2c55558c288..9543ccff776 100644 --- a/src/aosm/azext_aosm/generate_nfd/cnf_nfd_generator.py +++ b/src/aosm/azext_aosm/generate_nfd/cnf_nfd_generator.py @@ -712,7 +712,7 @@ def search_schema( # (This currently only supports a single type, not a list of types. # If a list is provided, we default to string.) array_item_schema = node.get("items", {}) - if type(array_item_schema) is dict: + if isinstance(array_item_schema, dict): param_type = array_item_schema.get("type", None) else: logger.debug("Array item schema is not a dict (probably a list)") diff --git a/src/aosm/azext_aosm/tests/latest/recordings/test_vnf_nsd_publish_and_delete.yaml b/src/aosm/azext_aosm/tests/latest/recordings/test_vnf_nsd_publish_and_delete.yaml deleted file mode 100644 index 97d2f8024d6..00000000000 --- a/src/aosm/azext_aosm/tests/latest/recordings/test_vnf_nsd_publish_and_delete.yaml +++ /dev/null @@ -1,8781 +0,0 @@ -interactions: -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-2023-09-01"}' - headers: - cache-control: - - no-cache - content-length: - - '290' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:38:30 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-Publisher"}' - headers: - cache-control: - - no-cache - content-length: - - '288' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:38:30 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: HEAD - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001?api-version=2022-09-01 - response: - body: - string: '' - headers: - cache-control: - - no-cache - content-length: - - '0' - date: - - Wed, 18 Oct 2023 13:38:30 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 204 - message: No Content -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001", - "name": "cli_test_vnf_nsd_000001", "type": "Microsoft.Resources/resourceGroups", - "location": "uaenorth", "tags": {"product": "azurecli", "cause": "automation", - "test": "test_vnf_nsd_publish_and_delete", "date": "2023-10-18T13:38:28Z", - "module": "aosm", "autoDelete": "true", "expiresOn": "2023-11-17T13:38:28.5214058Z"}, - "properties": {"provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '471' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:38:30 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "ResourceNotFound", "message": "The Resource ''Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher'' - under resource group ''cli_test_vnf_nsd_000001'' was not found. For more details - please go to https://aka.ms/ARMResourceNotFoundFix"}}' - headers: - cache-control: - - no-cache - content-length: - - '265' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:38:30 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - gateway - status: - code: 404 - message: Not Found -- request: - body: '{"location": "uaenorth", "properties": {"scope": "Private"}, "identity": - {"type": "SystemAssigned"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '100' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "name": "automated-tests-ubuntuPublisher", "type": "microsoft.hybridnetwork/publishers", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T13:38:32.2024151Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T13:38:32.2024151Z"}, "identity": {"principalId": "a5b8c784-46ee-4a43-b5ec-4d3a1db603af", - "tenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47", "type": "SystemAssigned"}, - "properties": {"scope": "Private", "provisioningState": "Accepted"}}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '759' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:38:41 GMT - etag: - - '"0700b034-0000-3200-0000-652fdfe10000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Accepted", "startTime": "2023-10-18T13:38:40.6052037Z"}' - headers: - cache-control: - - no-cache - content-length: - - '548' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:38:41 GMT - etag: - - '"0000ff14-0000-3200-0000-652fdfe00000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Accepted", "startTime": "2023-10-18T13:38:40.6052037Z"}' - headers: - cache-control: - - no-cache - content-length: - - '548' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:39:10 GMT - etag: - - '"0000ff14-0000-3200-0000-652fdfe00000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Accepted", "startTime": "2023-10-18T13:38:40.6052037Z"}' - headers: - cache-control: - - no-cache - content-length: - - '548' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:39:41 GMT - etag: - - '"0000ff14-0000-3200-0000-652fdfe00000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "bf3e8c2d-86c9-48fd-a7a9-43002fa705fc*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Succeeded", "startTime": "2023-10-18T13:38:40.6052037Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '569' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:40:10 GMT - etag: - - '"00000915-0000-3200-0000-652fe02c0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "name": "automated-tests-ubuntuPublisher", "type": "microsoft.hybridnetwork/publishers", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T13:38:32.2024151Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T13:38:32.2024151Z"}, "identity": {"principalId": "a5b8c784-46ee-4a43-b5ec-4d3a1db603af", - "tenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47", "type": "SystemAssigned"}, - "properties": {"scope": "Private", "provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '760' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:40:10 GMT - etag: - - '"0700c334-0000-3200-0000-652fdffa0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "ResourceNotFound", "message": "The Resource ''Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr'' - under resource group ''cli_test_vnf_nsd_000001'' was not found. For more details - please go to https://aka.ms/ARMResourceNotFoundFix"}}' - headers: - cache-control: - - no-cache - content-length: - - '291' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:40:11 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - gateway - status: - code: 404 - message: Not Found -- request: - body: '{"location": "uaenorth", "properties": {"storeType": "AzureContainerRegistry"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '79' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "name": "ubuntu-acr", "type": "microsoft.hybridnetwork/publishers/artifactstores", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T13:40:13.0005011Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T13:40:13.0005011Z"}, "properties": {"storeType": "AzureContainerRegistry", - "managedResourceGroupConfiguration": {"location": "uaenorth", "name": "ubuntu-acr-HostedResources-50EFD041"}, - "provisioningState": "Accepted"}}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/7177d2d4-da9a-4ad8-b3d4-6f5cb8a5d7d1*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '761' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:40:17 GMT - etag: - - '"0000cdec-0000-3200-0000-652fe0400000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1198' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/7177d2d4-da9a-4ad8-b3d4-6f5cb8a5d7d1*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/7177d2d4-da9a-4ad8-b3d4-6f5cb8a5d7d1*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "7177d2d4-da9a-4ad8-b3d4-6f5cb8a5d7d1*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Accepted", "startTime": "2023-10-18T13:40:16.4028064Z"}' - headers: - cache-control: - - no-cache - content-length: - - '574' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 13:40:17 GMT - etag: - - '"00000a15-0000-3200-0000-652fe0400000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/7177d2d4-da9a-4ad8-b3d4-6f5cb8a5d7d1*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/7177d2d4-da9a-4ad8-b3d4-6f5cb8a5d7d1*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "7177d2d4-da9a-4ad8-b3d4-6f5cb8a5d7d1*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Succeeded", "startTime": "2023-10-18T13:40:16.4028064Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '595' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:03:48 GMT - etag: - - '"00000f15-0000-3200-0000-652fe0c80000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "name": "ubuntu-acr", "type": "microsoft.hybridnetwork/publishers/artifactstores", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T13:40:13.0005011Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T13:40:13.0005011Z"}, "properties": {"storeType": "AzureContainerRegistry", - "replicationStrategy": "SingleReplication", "managedResourceGroupConfiguration": - {"name": "ubuntu-acr-HostedResources-50EFD041", "location": "uaenorth"}, "provisioningState": - "Succeeded", "storageResourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/ubuntu-acr-HostedResources-50EFD041/providers/Microsoft.ContainerRegistry/registries/AutomatedTestsUbuntupublisherUbuntuAcrc4f3741041"}}' - headers: - cache-control: - - no-cache - content-length: - - '1031' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:03:49 GMT - etag: - - '"000019ed-0000-3200-0000-652fe0c00000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "ResourceNotFound", "message": "The Resource ''Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store'' - under resource group ''cli_test_vnf_nsd_000001'' was not found. For more details - please go to https://aka.ms/ARMResourceNotFoundFix"}}' - headers: - cache-control: - - no-cache - content-length: - - '298' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:03:49 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - gateway - status: - code: 404 - message: Not Found -- request: - body: '{"location": "uaenorth", "properties": {"storeType": "AzureStorageAccount"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '76' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "name": "ubuntu-blob-store", "type": "microsoft.hybridnetwork/publishers/artifactstores", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:03:51.0828185Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:03:51.0828185Z"}, "properties": {"storeType": "AzureStorageAccount", - "managedResourceGroupConfiguration": {"location": "uaenorth", "name": "ubuntu-blob-store-HostedResources-1F1BBDBE"}, - "provisioningState": "Accepted"}}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '779' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:03:56 GMT - etag: - - '"000086ef-0000-3200-0000-652fe5cd0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Accepted", "startTime": "2023-10-18T14:03:56.2828932Z"}' - headers: - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:03:57 GMT - etag: - - '"00003015-0000-3200-0000-652fe5cc0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Accepted", "startTime": "2023-10-18T14:03:56.2828932Z"}' - headers: - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:04:26 GMT - etag: - - '"00003015-0000-3200-0000-652fe5cc0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Accepted", "startTime": "2023-10-18T14:03:56.2828932Z"}' - headers: - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:04:56 GMT - etag: - - '"00003015-0000-3200-0000-652fe5cc0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Accepted", "startTime": "2023-10-18T14:03:56.2828932Z"}' - headers: - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:05:26 GMT - etag: - - '"00003015-0000-3200-0000-652fe5cc0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Accepted", "startTime": "2023-10-18T14:03:56.2828932Z"}' - headers: - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:05:56 GMT - etag: - - '"00003015-0000-3200-0000-652fe5cc0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "b961b1b4-7bc7-41cc-813a-449b5145cd14*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Succeeded", "startTime": "2023-10-18T14:03:56.2828932Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '602' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:06:25 GMT - etag: - - '"00003315-0000-3200-0000-652fe65a0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "name": "ubuntu-blob-store", "type": "microsoft.hybridnetwork/publishers/artifactstores", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:03:51.0828185Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:03:51.0828185Z"}, "properties": {"storeType": "AzureStorageAccount", - "replicationStrategy": "SingleReplication", "managedResourceGroupConfiguration": - {"name": "ubuntu-blob-store-HostedResources-1F1BBDBE", "location": "uaenorth"}, - "provisioningState": "Succeeded", "storageResourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/ubuntu-blob-store-HostedResources-1F1BBDBE/providers/Microsoft.Storage/storageAccounts/1f1bbdbeubuntublobstore1"}}' - headers: - cache-control: - - no-cache - content-length: - - '1027' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:06:26 GMT - etag: - - '"0000caef-0000-3200-0000-652fe6460000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "ResourceNotFound", "message": "The Resource ''Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg'' - under resource group ''cli_test_vnf_nsd_000001'' was not found. For more details - please go to https://aka.ms/ARMResourceNotFoundFix"}}' - headers: - cache-control: - - no-cache - content-length: - - '312' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:06:26 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - gateway - status: - code: 404 - message: Not Found -- request: - body: '{"location": "uaenorth"}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '24' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "name": "ubuntu-vm-nfdg", "type": "microsoft.hybridnetwork/publishers/networkfunctiondefinitiongroups", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:06:27.8813669Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:06:27.8813669Z"}, "properties": {"provisioningState": "Accepted"}}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '654' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:06:34 GMT - etag: - - '"01006b8d-0000-3200-0000-652fe66b0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1198' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Accepted", "startTime": "2023-10-18T14:06:34.291379Z"}' - headers: - cache-control: - - no-cache - content-length: - - '594' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:06:34 GMT - etag: - - '"00003415-0000-3200-0000-652fe66a0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Accepted", "startTime": "2023-10-18T14:06:34.291379Z"}' - headers: - cache-control: - - no-cache - content-length: - - '594' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:07:04 GMT - etag: - - '"00003415-0000-3200-0000-652fe66a0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Accepted", "startTime": "2023-10-18T14:06:34.291379Z"}' - headers: - cache-control: - - no-cache - content-length: - - '594' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:07:34 GMT - etag: - - '"00003415-0000-3200-0000-652fe66a0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "ea40cf1b-1f7e-4ff5-ba63-dc5351eac8a8*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Succeeded", "startTime": "2023-10-18T14:06:34.291379Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '615' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:04 GMT - etag: - - '"00003715-0000-3200-0000-652fe6b70000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "name": "ubuntu-vm-nfdg", "type": "microsoft.hybridnetwork/publishers/networkfunctiondefinitiongroups", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:06:27.8813669Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:06:27.8813669Z"}, "properties": {"description": null, "provisioningState": - "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '676' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:05 GMT - etag: - - '"01007d8d-0000-3200-0000-652fe6810000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "ResourceNotFound", "message": "The Resource ''Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0'' - under resource group ''cli_test_vnf_nsd_000001'' was not found. For more details - please go to https://aka.ms/ARMResourceNotFoundFix"}}' - headers: - cache-control: - - no-cache - content-length: - - '338' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:05 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - gateway - status: - code: 404 - message: Not Found -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "ResourceNotFound", "message": "The Resource ''Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0'' - under resource group ''cli_test_vnf_nsd_000001'' was not found. For more details - please go to https://aka.ms/ARMResourceNotFoundFix"}}' - headers: - cache-control: - - no-cache - content-length: - - '344' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:05 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - gateway - status: - code: 404 - message: Not Found -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "17482952888473992305"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "saArtifactStoreName": {"type": "string", "metadata": {"description": - "Name of an existing Storage Account-backed Artifact Store, deployed under the - publisher."}}, "acrManifestName": {"type": "string", "metadata": {"description": - "Name of the manifest to deploy for the ACR-backed Artifact Store"}}, "saManifestName": - {"type": "string", "metadata": {"description": "Name of the manifest to deploy - for the Storage Account-backed Artifact Store"}}, "nfName": {"type": "string", - "metadata": {"description": "Name of Network Function. Used predominantly as - a prefix for other variable names"}}, "vhdVersion": {"type": "string", "metadata": - {"description": "The version that you want to name the NFM VHD artifact, in - format A-B-C. e.g. 6-13-0"}}, "armTemplateVersion": {"type": "string", "metadata": - {"description": "The name under which to store the ARM template"}}}, "resources": - [{"type": "Microsoft.Hybridnetwork/publishers/artifactStores/artifactManifests", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''saArtifactStoreName''), parameters(''saManifestName''))]", "location": - "[parameters(''location'')]", "properties": {"artifacts": [{"artifactName": - "[format(''{0}-vhd'', parameters(''nfName''))]", "artifactType": "VhdImageFile", - "artifactVersion": "[parameters(''vhdVersion'')]"}]}}, {"type": "Microsoft.Hybridnetwork/publishers/artifactStores/artifactManifests", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''acrArtifactStoreName''), parameters(''acrManifestName''))]", "location": - "[parameters(''location'')]", "properties": {"artifacts": [{"artifactName": - "[format(''{0}-arm-template'', parameters(''nfName''))]", "artifactType": "ArmTemplate", - "artifactVersion": "[parameters(''armTemplateVersion'')]"}]}}]}, "parameters": - {"location": {"value": "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "saArtifactStoreName": {"value": - "ubuntu-blob-store"}, "acrManifestName": {"value": "ubuntu-vm-acr-manifest-1-0-0"}, - "saManifestName": {"value": "ubuntu-vm-sa-manifest-1-0-0"}, "nfName": {"value": - "ubuntu-vm"}, "vhdVersion": {"value": "1-0-0"}, "armTemplateVersion": {"value": - "1.0.0"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '2911' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: POST - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/validate?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638089", - "name": "AOSM_CLI_deployment_1697638089", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "17482952888473992305", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "saArtifactStoreName": {"type": "String", - "value": "ubuntu-blob-store"}, "acrManifestName": {"type": "String", "value": - "ubuntu-vm-acr-manifest-1-0-0"}, "saManifestName": {"type": "String", "value": - "ubuntu-vm-sa-manifest-1-0-0"}, "nfName": {"type": "String", "value": "ubuntu-vm"}, - "vhdVersion": {"type": "String", "value": "1-0-0"}, "armTemplateVersion": - {"type": "String", "value": "1.0.0"}}, "mode": "Incremental", "provisioningState": - "Succeeded", "timestamp": "0001-01-01T00:00:00Z", "duration": "PT0S", "correlationId": - "bd5cc5c0-be32-4f09-bbaf-ea812660522e", "providers": [{"namespace": "Microsoft.Hybridnetwork", - "resourceTypes": [{"resourceType": "publishers/artifactStores/artifactManifests", - "locations": ["uaenorth"]}]}], "dependencies": [], "validatedResources": [{"id": - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0"}, - {"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '1819' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:13 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 200 - message: OK -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "17482952888473992305"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "saArtifactStoreName": {"type": "string", "metadata": {"description": - "Name of an existing Storage Account-backed Artifact Store, deployed under the - publisher."}}, "acrManifestName": {"type": "string", "metadata": {"description": - "Name of the manifest to deploy for the ACR-backed Artifact Store"}}, "saManifestName": - {"type": "string", "metadata": {"description": "Name of the manifest to deploy - for the Storage Account-backed Artifact Store"}}, "nfName": {"type": "string", - "metadata": {"description": "Name of Network Function. Used predominantly as - a prefix for other variable names"}}, "vhdVersion": {"type": "string", "metadata": - {"description": "The version that you want to name the NFM VHD artifact, in - format A-B-C. e.g. 6-13-0"}}, "armTemplateVersion": {"type": "string", "metadata": - {"description": "The name under which to store the ARM template"}}}, "resources": - [{"type": "Microsoft.Hybridnetwork/publishers/artifactStores/artifactManifests", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''saArtifactStoreName''), parameters(''saManifestName''))]", "location": - "[parameters(''location'')]", "properties": {"artifacts": [{"artifactName": - "[format(''{0}-vhd'', parameters(''nfName''))]", "artifactType": "VhdImageFile", - "artifactVersion": "[parameters(''vhdVersion'')]"}]}}, {"type": "Microsoft.Hybridnetwork/publishers/artifactStores/artifactManifests", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''acrArtifactStoreName''), parameters(''acrManifestName''))]", "location": - "[parameters(''location'')]", "properties": {"artifacts": [{"artifactName": - "[format(''{0}-arm-template'', parameters(''nfName''))]", "artifactType": "ArmTemplate", - "artifactVersion": "[parameters(''armTemplateVersion'')]"}]}}]}, "parameters": - {"location": {"value": "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "saArtifactStoreName": {"value": - "ubuntu-blob-store"}, "acrManifestName": {"value": "ubuntu-vm-acr-manifest-1-0-0"}, - "saManifestName": {"value": "ubuntu-vm-sa-manifest-1-0-0"}, "nfName": {"value": - "ubuntu-vm"}, "vhdVersion": {"value": "1-0-0"}, "armTemplateVersion": {"value": - "1.0.0"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '2911' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638089", - "name": "AOSM_CLI_deployment_1697638089", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "17482952888473992305", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "saArtifactStoreName": {"type": "String", - "value": "ubuntu-blob-store"}, "acrManifestName": {"type": "String", "value": - "ubuntu-vm-acr-manifest-1-0-0"}, "saManifestName": {"type": "String", "value": - "ubuntu-vm-sa-manifest-1-0-0"}, "nfName": {"type": "String", "value": "ubuntu-vm"}, - "vhdVersion": {"type": "String", "value": "1-0-0"}, "armTemplateVersion": - {"type": "String", "value": "1.0.0"}}, "mode": "Incremental", "provisioningState": - "Accepted", "timestamp": "2023-10-18T14:08:15.8809095Z", "duration": "PT0.0007626S", - "correlationId": "0139416a-834d-4cd9-b2e4-e71df7112069", "providers": [{"namespace": - "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": "publishers/artifactStores/artifactManifests", - "locations": ["uaenorth"]}]}], "dependencies": []}}' - headers: - azure-asyncoperation: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638089/operationStatuses/08585039687912940387?api-version=2022-09-01 - cache-control: - - no-cache - content-length: - - '1300' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:16 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687912940387?api-version=2022-09-01 - response: - body: - string: '{"status": "Accepted"}' - headers: - cache-control: - - no-cache - content-length: - - '22' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:17 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687912940387?api-version=2022-09-01 - response: - body: - string: '{"status": "Running"}' - headers: - cache-control: - - no-cache - content-length: - - '21' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:08:46 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687912940387?api-version=2022-09-01 - response: - body: - string: '{"status": "Succeeded"}' - headers: - cache-control: - - no-cache - content-length: - - '23' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:16 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638089", - "name": "AOSM_CLI_deployment_1697638089", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "17482952888473992305", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "saArtifactStoreName": {"type": "String", - "value": "ubuntu-blob-store"}, "acrManifestName": {"type": "String", "value": - "ubuntu-vm-acr-manifest-1-0-0"}, "saManifestName": {"type": "String", "value": - "ubuntu-vm-sa-manifest-1-0-0"}, "nfName": {"type": "String", "value": "ubuntu-vm"}, - "vhdVersion": {"type": "String", "value": "1-0-0"}, "armTemplateVersion": - {"type": "String", "value": "1.0.0"}}, "mode": "Incremental", "provisioningState": - "Succeeded", "timestamp": "2023-10-18T14:08:53.1761725Z", "duration": "PT37.2960256S", - "correlationId": "0139416a-834d-4cd9-b2e4-e71df7112069", "providers": [{"namespace": - "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": "publishers/artifactStores/artifactManifests", - "locations": ["uaenorth"]}]}], "dependencies": [], "outputResources": [{"id": - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0"}, - {"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '1833' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:16 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0", - "name": "ubuntu-vm-sa-manifest-1-0-0", "type": "microsoft.hybridnetwork/publishers/artifactstores/artifactmanifests", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:08:21.1556817Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:08:21.1556817Z"}, "properties": {"artifacts": [{"artifactName": - "ubuntu-vm-vhd", "artifactType": "VhdImageFile", "artifactVersion": "1-0-0"}], - "artifactManifestState": "Uploading", "provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '849' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:17 GMT - etag: - - '"0400dd9a-0000-3200-0000-652fe6ec0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: POST - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0/listCredential?api-version=2023-09-01 - response: - body: - string: '{"storageAccountId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/ubuntu-blob-store-HostedResources-1F1BBDBE/providers/Microsoft.Storage/storageAccounts/1f1bbdbeubuntublobstore1", - "containerCredentials": [{"containerName": "ubuntuvmvhd-1-0-0", "containerSasUri": - "https://xxxxxxxxxxxxxxx.blob.core.windows.net/ubuntuvmvhd-1-0-0?sv=2021-08-06&si=StorageAccountAccessPolicy&sr=xxxxxxxxxxxxxxxxxxxx"}], - "expiry": "2023-10-19T14:09:21.0578536+00:00", "credentialType": "AzureStorageAccountToken"}' - headers: - cache-control: - - no-cache - content-length: - - '515' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:20 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0", - "name": "ubuntu-vm-acr-manifest-1-0-0", "type": "microsoft.hybridnetwork/publishers/artifactstores/artifactmanifests", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:08:21.1244353Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:08:21.1244353Z"}, "properties": {"artifacts": [{"artifactName": - "ubuntu-vm-arm-template", "artifactType": "ArmTemplate", "artifactVersion": - "1.0.0"}], "artifactManifestState": "Uploading", "provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '852' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:21 GMT - etag: - - '"0400de9a-0000-3200-0000-652fe6ee0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: POST - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0/listCredential?api-version=2023-09-01 - response: - body: - string: '{"username": "ubuntu-vm-acr-manifest-1-0-0", "acrToken": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - "acrServerUrl": "https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io", - "repositories": ["ubuntu-vm-arm-template"], "expiry": "2023-10-19T14:09:24.2646249+00:00", - "credentialType": "AzureContainerRegistryScopedToken"}' - headers: - cache-control: - - no-cache - content-length: - - '345' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:25 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1198' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/xml - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - User-Agent: - - azsdk-python-storage-blob/12.16.0 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - x-ms-blob-content-length: - - '512' - x-ms-blob-type: - - PageBlob - x-ms-date: - - Wed, 18 Oct 2023 14:09:26 GMT - x-ms-version: - - '2022-11-02' - method: PUT - uri: https://xxxxxxxxxxxxxxx.blob.core.windows.net/ubuntuvmvhd-1-0-0/ubuntuvm-1-0-0.vhd?sv=2021-08-06&si=StorageAccountAccessPolicy&sr=xxxxxxxxxxxxxxxxxxxx - response: - body: - string: '' - headers: - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:09:26 GMT - etag: - - '"0x8DBCFE3D6A6B00E"' - last-modified: - - Wed, 18 Oct 2023 14:09:26 GMT - server: - - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 - x-ms-request-server-encrypted: - - 'true' - x-ms-version: - - '2022-11-02' - status: - code: 201 - message: Created -- request: - body: "61 38 6b 92 16 4b cc ac 6f d4 02 5c 6f 62 79 b9 \n8e 62 ae 07 02 1c dc - 73 5b 7a 51 e7 56 4e 4a b0 \n54 4a 93 2e 6b dd 3c b5 8b 60 fa 80 b1 80 1b 89 - \n1e 4d 7d 86 8e 25 76 58 24 8d 21 87 83 06 88 d6 \na4 fd 94 9c 66 b6 db ee - 92 46 f0 25 fc 84 bb f5 \n3f d9 49 28 ea 54 6a 2a 33 fa e0 47 eb 22 af 91 \nd4 - 34 a6 d9 fe 58 cb 54 03 35 d6 45 40 96 4e f3 \n31 ea 78 20 45 e9 f2 3a de cb - 38 53 c0 9c b2 b7 \n12 9e 57 d9 f6 1b cb 20 23 8c 86 d3 40 da 84 c3 \n22 5b - 48 61 63 e2 5f 5f 43 6d 8f 41 fc ce c1 87 \n33 e1 e2 61 63 e2 5f 5" - headers: - Accept: - - application/xml - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '512' - Content-Type: - - application/octet-stream - If-Match: - - '"0x8DBCFE3D6A6B00E"' - User-Agent: - - azsdk-python-storage-blob/12.16.0 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - x-ms-date: - - Wed, 18 Oct 2023 14:09:27 GMT - x-ms-page-write: - - update - x-ms-range: - - bytes=0-511 - x-ms-version: - - '2022-11-02' - method: PUT - uri: https://xxxxxxxxxxxxxxx.blob.core.windows.net/ubuntuvmvhd-1-0-0/ubuntuvm-1-0-0.vhd?comp=page&sv=2021-08-06&si=StorageAccountAccessPolicy&sr=xxxxxxxxxxxxxxxxxxxx - response: - body: - string: '' - headers: - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:09:26 GMT - etag: - - '"0x8DBCFE3D6BE5302"' - last-modified: - - Wed, 18 Oct 2023 14:09:27 GMT - server: - - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 - x-ms-blob-sequence-number: - - '0' - x-ms-content-crc64: - - iWvWqElPrJg= - x-ms-request-server-encrypted: - - 'true' - x-ms-version: - - '2022-11-02' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/blobs/uploads/ - response: - body: - string: '{"errors": [{"code": "UNAUTHORIZED", "message": "authentication required, - visit https://aka.ms/acr/authorization for more information.", "detail": [{"Type": - "repository", "Name": "ubuntu-vm-arm-template", "Action": "pull"}, {"Type": - "repository", "Name": "ubuntu-vm-arm-template", "Action": "push"}]}]}' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '302' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:27 GMT - docker-distribution-api-version: - - registry/2.0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - www-authenticate: - - Bearer realm="https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token",service="automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io",scope="repository:ubuntu-vm-arm-template:pull,push" - x-content-type-options: - - nosniff - status: - code: 401 - message: Unauthorized -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Service: - - automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io - User-Agent: - - oras-py - method: GET - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token?service=automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io&scope=repository%3Aubuntu-vm-arm-template%3Apull%2Cpush - response: - body: - string: '{"access_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}' - headers: - connection: - - keep-alive - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:27 GMT - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - x-ms-ratelimit-remaining-calls-per-second: - - '333.316667' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/blobs/uploads/ - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:09:27 GMT - docker-distribution-api-version: - - registry/2.0 - docker-upload-uuid: - - d7f4991b-9178-478e-91e7-b1cc43e2a9cb - location: - - /v2/ubuntu-vm-arm-template/blobs/uploads/d7f4991b-9178-478e-91e7-b1cc43e2a9cb?_nouploadcache=false&_state=d_6r7IkzYO_5Y42pShNHfyVVvDaoi77zDk-uG5xJ1-R7Ik5hbWUiOiJ1YnVudHUtdm0tYXJtLXRlbXBsYXRlIiwiVVVJRCI6ImQ3ZjQ5OTFiLTkxNzgtNDc4ZS05MWU3LWIxY2M0M2UyYTljYiIsIk9mZnNldCI6MCwiU3RhcnRlZEF0IjoiMjAyMy0xMC0xOFQxNDowOToyNy45NTI4NTkxMloifQ%3D%3D - range: - - 0-0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: "{\n \"$schema\": \"https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#\",\n - \ \"contentVersion\": \"1.0.0.0\",\n \"metadata\": {\n \"_generator\": {\n - \ \"name\": \"bicep\",\n \"version\": \"0.8.9.13224\",\n \"templateHash\": - \"14979664264804385741\"\n }\n },\n \"parameters\": {\n \"location\": - {\n \"type\": \"string\",\n \"defaultValue\": \"[resourceGroup().location]\"\n - \ },\n \"subnetName\": {\n \"type\": \"string\"\n },\n \"ubuntuVmName\": - {\n \"type\": \"string\",\n \"defaultValue\": \"ubuntu-vm\"\n },\n - \ \"virtualNetworkId\": {\n \"type\": \"string\"\n },\n \"sshPublicKeyAdmin\": - {\n \"type\": \"string\"\n },\n \"imageName\": {\n \"type\": - \"string\"\n }\n },\n \"variables\": {\n \"imageResourceGroup\": \"[resourceGroup().name]\",\n - \ \"subscriptionId\": \"[subscription().subscriptionId]\",\n \"vmSizeSku\": - \"Standard_D2s_v3\"\n },\n \"resources\": [\n {\n \"type\": \"Microsoft.Network/networkInterfaces\",\n - \ \"apiVersion\": \"2021-05-01\",\n \"name\": \"[format('{0}_nic', - parameters('ubuntuVmName'))]\",\n \"location\": \"[parameters('location')]\",\n - \ \"properties\": {\n \"ipConfigurations\": [\n {\n \"name\": - \"ipconfig1\",\n \"properties\": {\n \"subnet\": {\n - \ \"id\": \"[format('{0}/subnets/{1}', parameters('virtualNetworkId'), - parameters('subnetName'))]\"\n },\n \"primary\": true,\n - \ \"privateIPAddressVersion\": \"IPv4\"\n }\n }\n - \ ]\n }\n },\n {\n \"type\": \"Microsoft.Compute/virtualMachines\",\n - \ \"apiVersion\": \"2021-07-01\",\n \"name\": \"[parameters('ubuntuVmName')]\",\n - \ \"location\": \"[parameters('location')]\",\n \"properties\": {\n - \ \"hardwareProfile\": {\n \"vmSize\": \"[variables('vmSizeSku')]\"\n - \ },\n \"storageProfile\": {\n \"imageReference\": {\n - \ \"id\": \"[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', - variables('subscriptionId'), variables('imageResourceGroup')), 'Microsoft.Compute/images', - parameters('imageName'))]\"\n },\n \"osDisk\": {\n \"osType\": - \"Linux\",\n \"name\": \"[format('{0}_disk', parameters('ubuntuVmName'))]\",\n - \ \"createOption\": \"FromImage\",\n \"caching\": \"ReadWrite\",\n - \ \"writeAcceleratorEnabled\": false,\n \"managedDisk\": - \"[json('{\\\"storageAccountType\\\": \\\"Premium_LRS\\\"}')]\",\n \"deleteOption\": - \"Delete\",\n \"diskSizeGB\": 30\n }\n },\n \"osProfile\": - {\n \"computerName\": \"[parameters('ubuntuVmName')]\",\n \"adminUsername\": - \"azureuser\",\n \"linuxConfiguration\": {\n \"disablePasswordAuthentication\": - true,\n \"ssh\": {\n \"publicKeys\": [\n {\n - \ \"path\": \"/home/azureuser/.ssh/authorized_keys\",\n \"keyData\": - \"[parameters('sshPublicKeyAdmin')]\"\n }\n ]\n - \ },\n \"provisionVMAgent\": true,\n \"patchSettings\": - {\n \"patchMode\": \"ImageDefault\",\n \"assessmentMode\": - \"ImageDefault\"\n }\n },\n \"secrets\": [],\n - \ \"allowExtensionOperations\": true\n },\n \"networkProfile\": - {\n \"networkInterfaces\": [\n {\n \"id\": - \"[resourceId('Microsoft.Network/networkInterfaces', format('{0}_nic', parameters('ubuntuVmName')))]\"\n - \ }\n ]\n }\n },\n \"dependsOn\": [\n \"[resourceId('Microsoft.Network/networkInterfaces', - format('{0}_nic', parameters('ubuntuVmName')))]\"\n ]\n }\n ]\n}" - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3591' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/blobs/uploads/d7f4991b-9178-478e-91e7-b1cc43e2a9cb?_nouploadcache=false&_state=d_6r7IkzYO_5Y42pShNHfyVVvDaoi77zDk-uG5xJ1-R7Ik5hbWUiOiJ1YnVudHUtdm0tYXJtLXRlbXBsYXRlIiwiVVVJRCI6ImQ3ZjQ5OTFiLTkxNzgtNDc4ZS05MWU3LWIxY2M0M2UyYTljYiIsIk9mZnNldCI6MCwiU3RhcnRlZEF0IjoiMjAyMy0xMC0xOFQxNDowOToyNy45NTI4NTkxMloifQ%3D%3D&digest=sha256%3Ae71bf56543dc33dc8e550a0c574efe9a4875754a4ddf74347e448dec2462798b - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:09:28 GMT - docker-content-digest: - - sha256:e71bf56543dc33dc8e550a0c574efe9a4875754a4ddf74347e448dec2462798b - docker-distribution-api-version: - - registry/2.0 - location: - - /v2/ubuntu-vm-arm-template/blobs/sha256:e71bf56543dc33dc8e550a0c574efe9a4875754a4ddf74347e448dec2462798b - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/blobs/uploads/ - response: - body: - string: '{"errors": [{"code": "UNAUTHORIZED", "message": "authentication required, - visit https://aka.ms/acr/authorization for more information.", "detail": [{"Type": - "repository", "Name": "ubuntu-vm-arm-template", "Action": "pull"}, {"Type": - "repository", "Name": "ubuntu-vm-arm-template", "Action": "push"}]}]}' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '302' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:28 GMT - docker-distribution-api-version: - - registry/2.0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - www-authenticate: - - Bearer realm="https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token",service="automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io",scope="repository:ubuntu-vm-arm-template:pull,push" - x-content-type-options: - - nosniff - status: - code: 401 - message: Unauthorized -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Service: - - automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io - User-Agent: - - oras-py - method: GET - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token?service=automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io&scope=repository%3Aubuntu-vm-arm-template%3Apull%2Cpush - response: - body: - string: '{"access_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}' - headers: - connection: - - keep-alive - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:28 GMT - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - x-ms-ratelimit-remaining-calls-per-second: - - '333.3' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/blobs/uploads/ - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:09:28 GMT - docker-distribution-api-version: - - registry/2.0 - docker-upload-uuid: - - 6ebeb5c8-ee46-40b1-9883-7df57d243785 - location: - - /v2/ubuntu-vm-arm-template/blobs/uploads/6ebeb5c8-ee46-40b1-9883-7df57d243785?_nouploadcache=false&_state=7fG-NeCYbsCo1LSSZDDPAY7uvIypztBHBQYKaQ19a3t7Ik5hbWUiOiJ1YnVudHUtdm0tYXJtLXRlbXBsYXRlIiwiVVVJRCI6IjZlYmViNWM4LWVlNDYtNDBiMS05ODgzLTdkZjU3ZDI0Mzc4NSIsIk9mZnNldCI6MCwiU3RhcnRlZEF0IjoiMjAyMy0xMC0xOFQxNDowOToyOC42NjIxOTIxNzlaIn0%3D - range: - - 0-0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/blobs/uploads/6ebeb5c8-ee46-40b1-9883-7df57d243785?_nouploadcache=false&_state=7fG-NeCYbsCo1LSSZDDPAY7uvIypztBHBQYKaQ19a3t7Ik5hbWUiOiJ1YnVudHUtdm0tYXJtLXRlbXBsYXRlIiwiVVVJRCI6IjZlYmViNWM4LWVlNDYtNDBiMS05ODgzLTdkZjU3ZDI0Mzc4NSIsIk9mZnNldCI6MCwiU3RhcnRlZEF0IjoiMjAyMy0xMC0xOFQxNDowOToyOC42NjIxOTIxNzlaIn0%3D&digest=sha256%3Ae3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:09:28 GMT - docker-content-digest: - - sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - docker-distribution-api-version: - - registry/2.0 - location: - - /v2/ubuntu-vm-arm-template/blobs/sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 201 - message: Created -- request: - body: '{"schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", - "config": {"mediaType": "application/vnd.unknown.config.v1+json", "size": 0, - "digest": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - "layers": [{"mediaType": "application/vnd.oci.image.layer.v1.tar", "size": 3591, - "digest": "sha256:e71bf56543dc33dc8e550a0c574efe9a4875754a4ddf74347e448dec2462798b", - "annotations": {"org.opencontainers.image.title": "ubuntu_template.json"}}], - "annotations": {}}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '504' - Content-Type: - - application/vnd.oci.image.manifest.v1+json - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/manifests/1.0.0 - response: - body: - string: '{"errors": [{"code": "UNAUTHORIZED", "message": "authentication required, - visit https://aka.ms/acr/authorization for more information.", "detail": [{"Type": - "repository", "Name": "ubuntu-vm-arm-template", "Action": "pull"}, {"Type": - "repository", "Name": "ubuntu-vm-arm-template", "Action": "push"}]}]}' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '302' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:29 GMT - docker-distribution-api-version: - - registry/2.0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - www-authenticate: - - Bearer realm="https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token",service="automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io",scope="repository:ubuntu-vm-arm-template:pull,push" - x-content-type-options: - - nosniff - status: - code: 401 - message: Unauthorized -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Service: - - automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io - User-Agent: - - oras-py - method: GET - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token?service=automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io&scope=repository%3Aubuntu-vm-arm-template%3Apull%2Cpush - response: - body: - string: '{"access_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}' - headers: - connection: - - keep-alive - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:29 GMT - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - x-ms-ratelimit-remaining-calls-per-second: - - '333.283333' - status: - code: 200 - message: OK -- request: - body: '{"schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", - "config": {"mediaType": "application/vnd.unknown.config.v1+json", "size": 0, - "digest": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - "layers": [{"mediaType": "application/vnd.oci.image.layer.v1.tar", "size": 3591, - "digest": "sha256:e71bf56543dc33dc8e550a0c574efe9a4875754a4ddf74347e448dec2462798b", - "annotations": {"org.opencontainers.image.title": "ubuntu_template.json"}}], - "annotations": {}}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '504' - Content-Type: - - application/vnd.oci.image.manifest.v1+json - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-arm-template/manifests/1.0.0 - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:09:29 GMT - docker-content-digest: - - sha256:8923fa544da97914212bc9173ec512741d331940e4a2c7b6fbad979657a5c507 - docker-distribution-api-version: - - registry/2.0 - location: - - /v2/ubuntu-vm-arm-template/manifests/sha256:8923fa544da97914212bc9173ec512741d331940e4a2c7b6fbad979657a5c507 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 201 - message: Created -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "162926653163819683"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "saArtifactStoreName": {"type": "string", "metadata": {"description": - "Name of an existing Storage Account-backed Artifact Store, deployed under the - publisher."}}, "nfName": {"type": "string", "metadata": {"description": "Name - of Network Function. Used predominantly as a prefix for other variable names"}}, - "nfDefinitionGroup": {"type": "string", "metadata": {"description": "Name of - an existing Network Function Definition Group"}}, "nfDefinitionVersion": {"type": - "string", "metadata": {"description": "The version of the NFDV you want to deploy, - in format A.B.C"}}, "vhdVersion": {"type": "string", "metadata": {"description": - "The version that you want to name the NFM VHD artifact, in format A-B-C. e.g. - 6-13-0"}}, "armTemplateVersion": {"type": "string", "metadata": {"description": - "The version that you want to name the NFM template artifact, in format A.B.C. - e.g. 6.13.0. If testing for development, you can use any numbers you like."}}}, - "variables": {"$fxv#0": {"$schema": "https://json-schema.org/draft-07/schema#", - "title": "DeployParametersSchema", "type": "object", "properties": {"location": - {"type": "string"}, "subnetName": {"type": "string"}, "ubuntuVmName": {"type": - "string"}, "virtualNetworkId": {"type": "string"}, "sshPublicKeyAdmin": {"type": - "string"}}}, "$fxv#1": {"imageName": "ubuntu-vmImage", "azureDeployLocation": - "{deployParameters.location}"}, "$fxv#2": {"location": "{deployParameters.location}", - "subnetName": "{deployParameters.subnetName}", "ubuntuVmName": "{deployParameters.ubuntuVmName}", - "virtualNetworkId": "{deployParameters.virtualNetworkId}", "sshPublicKeyAdmin": - "{deployParameters.sshPublicKeyAdmin}", "imageName": "ubuntu-vmImage"}}, "resources": - [{"type": "Microsoft.Hybridnetwork/publishers/networkfunctiondefinitiongroups/networkfunctiondefinitionversions", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''nfDefinitionGroup''), parameters(''nfDefinitionVersion''))]", "location": - "[parameters(''location'')]", "properties": {"versionState": "Preview", "deployParameters": - "[string(variables(''$fxv#0''))]", "networkFunctionType": "VirtualNetworkFunction", - "networkFunctionTemplate": {"nfviType": "AzureCore", "networkFunctionApplications": - [{"artifactType": "VhdImageFile", "name": "[format(''{0}Image'', parameters(''nfName''))]", - "dependsOnProfile": null, "artifactProfile": {"vhdArtifactProfile": {"vhdName": - "[format(''{0}-vhd'', parameters(''nfName''))]", "vhdVersion": "[parameters(''vhdVersion'')]"}, - "artifactStore": {"id": "[resourceId(''Microsoft.HybridNetwork/publishers/artifactStores'', - parameters(''publisherName''), parameters(''saArtifactStoreName''))]"}}, "deployParametersMappingRuleProfile": - {"vhdImageMappingRuleProfile": {"userConfiguration": "[string(variables(''$fxv#1''))]"}, - "applicationEnablement": "Unknown"}}, {"artifactType": "ArmTemplate", "name": - "[parameters(''nfName'')]", "dependsOnProfile": null, "artifactProfile": {"templateArtifactProfile": - {"templateName": "[format(''{0}-arm-template'', parameters(''nfName''))]", "templateVersion": - "[parameters(''armTemplateVersion'')]"}, "artifactStore": {"id": "[resourceId(''Microsoft.HybridNetwork/publishers/artifactStores'', - parameters(''publisherName''), parameters(''acrArtifactStoreName''))]"}}, "deployParametersMappingRuleProfile": - {"templateMappingRuleProfile": {"templateParameters": "[string(variables(''$fxv#2''))]"}, - "applicationEnablement": "Unknown"}}]}}}]}, "parameters": {"location": {"value": - "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "saArtifactStoreName": {"value": - "ubuntu-blob-store"}, "nfName": {"value": "ubuntu-vm"}, "nfDefinitionGroup": - {"value": "ubuntu-vm-nfdg"}, "nfDefinitionVersion": {"value": "1.0.0"}, "vhdVersion": - {"value": "1-0-0"}, "armTemplateVersion": {"value": "1.0.0"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '4493' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: POST - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/validate?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638172", - "name": "AOSM_CLI_deployment_1697638172", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "162926653163819683", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "saArtifactStoreName": {"type": "String", - "value": "ubuntu-blob-store"}, "nfName": {"type": "String", "value": "ubuntu-vm"}, - "nfDefinitionGroup": {"type": "String", "value": "ubuntu-vm-nfdg"}, "nfDefinitionVersion": - {"type": "String", "value": "1.0.0"}, "vhdVersion": {"type": "String", "value": - "1-0-0"}, "armTemplateVersion": {"type": "String", "value": "1.0.0"}}, "mode": - "Incremental", "provisioningState": "Succeeded", "timestamp": "0001-01-01T00:00:00Z", - "duration": "PT0S", "correlationId": "20c9e46c-3cb0-44a7-957e-bbfbbad3c1a2", - "providers": [{"namespace": "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": - "publishers/networkfunctiondefinitiongroups/networkfunctiondefinitionversions", - "locations": ["uaenorth"]}]}], "dependencies": [], "validatedResources": [{"id": - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkfunctiondefinitiongroups/ubuntu-vm-nfdg/networkfunctiondefinitionversions/1.0.0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '1577' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:34 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1198' - status: - code: 200 - message: OK -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "162926653163819683"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "saArtifactStoreName": {"type": "string", "metadata": {"description": - "Name of an existing Storage Account-backed Artifact Store, deployed under the - publisher."}}, "nfName": {"type": "string", "metadata": {"description": "Name - of Network Function. Used predominantly as a prefix for other variable names"}}, - "nfDefinitionGroup": {"type": "string", "metadata": {"description": "Name of - an existing Network Function Definition Group"}}, "nfDefinitionVersion": {"type": - "string", "metadata": {"description": "The version of the NFDV you want to deploy, - in format A.B.C"}}, "vhdVersion": {"type": "string", "metadata": {"description": - "The version that you want to name the NFM VHD artifact, in format A-B-C. e.g. - 6-13-0"}}, "armTemplateVersion": {"type": "string", "metadata": {"description": - "The version that you want to name the NFM template artifact, in format A.B.C. - e.g. 6.13.0. If testing for development, you can use any numbers you like."}}}, - "variables": {"$fxv#0": {"$schema": "https://json-schema.org/draft-07/schema#", - "title": "DeployParametersSchema", "type": "object", "properties": {"location": - {"type": "string"}, "subnetName": {"type": "string"}, "ubuntuVmName": {"type": - "string"}, "virtualNetworkId": {"type": "string"}, "sshPublicKeyAdmin": {"type": - "string"}}}, "$fxv#1": {"imageName": "ubuntu-vmImage", "azureDeployLocation": - "{deployParameters.location}"}, "$fxv#2": {"location": "{deployParameters.location}", - "subnetName": "{deployParameters.subnetName}", "ubuntuVmName": "{deployParameters.ubuntuVmName}", - "virtualNetworkId": "{deployParameters.virtualNetworkId}", "sshPublicKeyAdmin": - "{deployParameters.sshPublicKeyAdmin}", "imageName": "ubuntu-vmImage"}}, "resources": - [{"type": "Microsoft.Hybridnetwork/publishers/networkfunctiondefinitiongroups/networkfunctiondefinitionversions", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''nfDefinitionGroup''), parameters(''nfDefinitionVersion''))]", "location": - "[parameters(''location'')]", "properties": {"versionState": "Preview", "deployParameters": - "[string(variables(''$fxv#0''))]", "networkFunctionType": "VirtualNetworkFunction", - "networkFunctionTemplate": {"nfviType": "AzureCore", "networkFunctionApplications": - [{"artifactType": "VhdImageFile", "name": "[format(''{0}Image'', parameters(''nfName''))]", - "dependsOnProfile": null, "artifactProfile": {"vhdArtifactProfile": {"vhdName": - "[format(''{0}-vhd'', parameters(''nfName''))]", "vhdVersion": "[parameters(''vhdVersion'')]"}, - "artifactStore": {"id": "[resourceId(''Microsoft.HybridNetwork/publishers/artifactStores'', - parameters(''publisherName''), parameters(''saArtifactStoreName''))]"}}, "deployParametersMappingRuleProfile": - {"vhdImageMappingRuleProfile": {"userConfiguration": "[string(variables(''$fxv#1''))]"}, - "applicationEnablement": "Unknown"}}, {"artifactType": "ArmTemplate", "name": - "[parameters(''nfName'')]", "dependsOnProfile": null, "artifactProfile": {"templateArtifactProfile": - {"templateName": "[format(''{0}-arm-template'', parameters(''nfName''))]", "templateVersion": - "[parameters(''armTemplateVersion'')]"}, "artifactStore": {"id": "[resourceId(''Microsoft.HybridNetwork/publishers/artifactStores'', - parameters(''publisherName''), parameters(''acrArtifactStoreName''))]"}}, "deployParametersMappingRuleProfile": - {"templateMappingRuleProfile": {"templateParameters": "[string(variables(''$fxv#2''))]"}, - "applicationEnablement": "Unknown"}}]}}}]}, "parameters": {"location": {"value": - "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "saArtifactStoreName": {"value": - "ubuntu-blob-store"}, "nfName": {"value": "ubuntu-vm"}, "nfDefinitionGroup": - {"value": "ubuntu-vm-nfdg"}, "nfDefinitionVersion": {"value": "1.0.0"}, "vhdVersion": - {"value": "1-0-0"}, "armTemplateVersion": {"value": "1.0.0"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - Content-Length: - - '4493' - Content-Type: - - application/json - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638172", - "name": "AOSM_CLI_deployment_1697638172", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "162926653163819683", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "saArtifactStoreName": {"type": "String", - "value": "ubuntu-blob-store"}, "nfName": {"type": "String", "value": "ubuntu-vm"}, - "nfDefinitionGroup": {"type": "String", "value": "ubuntu-vm-nfdg"}, "nfDefinitionVersion": - {"type": "String", "value": "1.0.0"}, "vhdVersion": {"type": "String", "value": - "1-0-0"}, "armTemplateVersion": {"type": "String", "value": "1.0.0"}}, "mode": - "Incremental", "provisioningState": "Accepted", "timestamp": "2023-10-18T14:09:36.7230807Z", - "duration": "PT0.0004793S", "correlationId": "50e1db5b-ce5e-452b-9ebf-c2cde5d771ae", - "providers": [{"namespace": "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": - "publishers/networkfunctiondefinitiongroups/networkfunctiondefinitionversions", - "locations": ["uaenorth"]}]}], "dependencies": []}}' - headers: - azure-asyncoperation: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638172/operationStatuses/08585039687102911025?api-version=2022-09-01 - cache-control: - - no-cache - content-length: - - '1302' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:37 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1198' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687102911025?api-version=2022-09-01 - response: - body: - string: '{"status": "Accepted"}' - headers: - cache-control: - - no-cache - content-length: - - '22' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:09:37 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687102911025?api-version=2022-09-01 - response: - body: - string: '{"status": "Running"}' - headers: - cache-control: - - no-cache - content-length: - - '21' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:10:06 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687102911025?api-version=2022-09-01 - response: - body: - string: '{"status": "Running"}' - headers: - cache-control: - - no-cache - content-length: - - '21' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:10:37 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687102911025?api-version=2022-09-01 - response: - body: - string: '{"status": "Running"}' - headers: - cache-control: - - no-cache - content-length: - - '21' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:06 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039687102911025?api-version=2022-09-01 - response: - body: - string: '{"status": "Succeeded"}' - headers: - cache-control: - - no-cache - content-length: - - '23' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:37 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd publish - Connection: - - keep-alive - ParameterSetName: - - -f --definition-type - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638172", - "name": "AOSM_CLI_deployment_1697638172", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "162926653163819683", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "saArtifactStoreName": {"type": "String", - "value": "ubuntu-blob-store"}, "nfName": {"type": "String", "value": "ubuntu-vm"}, - "nfDefinitionGroup": {"type": "String", "value": "ubuntu-vm-nfdg"}, "nfDefinitionVersion": - {"type": "String", "value": "1.0.0"}, "vhdVersion": {"type": "String", "value": - "1-0-0"}, "armTemplateVersion": {"type": "String", "value": "1.0.0"}}, "mode": - "Incremental", "provisioningState": "Succeeded", "timestamp": "2023-10-18T14:11:20.2791579Z", - "duration": "PT1M43.5565565S", "correlationId": "50e1db5b-ce5e-452b-9ebf-c2cde5d771ae", - "providers": [{"namespace": "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": - "publishers/networkfunctiondefinitiongroups/networkfunctiondefinitionversions", - "locations": ["uaenorth"]}]}], "dependencies": [], "outputResources": [{"id": - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkfunctiondefinitiongroups/ubuntu-vm-nfdg/networkfunctiondefinitionversions/1.0.0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '1593' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:37 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd build - Connection: - - keep-alive - ParameterSetName: - - -f --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg/networkFunctionDefinitionVersions/1.0.0?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkfunctiondefinitiongroups/ubuntu-vm-nfdg/networkfunctiondefinitionversions/1.0.0", - "name": "1.0.0", "type": "microsoft.hybridnetwork/publishers/networkfunctiondefinitiongroups/networkfunctiondefinitionversions", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:09:42.3859631Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:09:42.3859631Z"}, "properties": {"networkFunctionTemplate": - {"networkFunctionApplications": [{"artifactProfile": {"vhdArtifactProfile": - {"vhdName": "ubuntu-vm-vhd", "vhdVersion": "1-0-0"}, "artifactStore": {"id": - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store"}}, - "deployParametersMappingRuleProfile": {"vhdImageMappingRuleProfile": {"userConfiguration": - "{\"imageName\":\"ubuntu-vmImage\",\"azureDeployLocation\":\"{deployParameters.location}\"}"}, - "applicationEnablement": "Unknown"}, "artifactType": "VhdImageFile", "dependsOnProfile": - null, "name": "ubuntu-vmImage"}, {"artifactProfile": {"templateArtifactProfile": - {"templateName": "ubuntu-vm-arm-template", "templateVersion": "1.0.0"}, "artifactStore": - {"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr"}}, - "deployParametersMappingRuleProfile": {"templateMappingRuleProfile": {"templateParameters": - "{\"location\":\"{deployParameters.location}\",\"subnetName\":\"{deployParameters.subnetName}\",\"ubuntuVmName\":\"{deployParameters.ubuntuVmName}\",\"virtualNetworkId\":\"{deployParameters.virtualNetworkId}\",\"sshPublicKeyAdmin\":\"{deployParameters.sshPublicKeyAdmin}\",\"imageName\":\"ubuntu-vmImage\"}"}, - "applicationEnablement": "Unknown"}, "artifactType": "ArmTemplate", "dependsOnProfile": - null, "name": "ubuntu-vm"}], "nfviType": "AzureCore"}, "versionState": "Preview", - "description": null, "deployParameters": "{\"$schema\":\"https://json-schema.org/draft-07/schema#\",\"title\":\"DeployParametersSchema\",\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\"},\"subnetName\":{\"type\":\"string\"},\"ubuntuVmName\":{\"type\":\"string\"},\"virtualNetworkId\":{\"type\":\"string\"},\"sshPublicKeyAdmin\":{\"type\":\"string\"}},\"required\":[\"location\",\"subnetName\",\"ubuntuVmName\",\"virtualNetworkId\",\"sshPublicKeyAdmin\"]}", - "networkFunctionType": "VirtualNetworkFunction", "provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '2856' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:40 GMT - etag: - - '"1c00f218-0000-3200-0000-652fe7560000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-2023-09-01"}' - headers: - cache-control: - - no-cache - content-length: - - '290' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:40 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-Publisher"}' - headers: - cache-control: - - no-cache - content-length: - - '288' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:40 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: HEAD - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001?api-version=2022-09-01 - response: - body: - string: '' - headers: - cache-control: - - no-cache - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:11:39 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 204 - message: No Content -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001", - "name": "cli_test_vnf_nsd_000001", "type": "Microsoft.Resources/resourceGroups", - "location": "uaenorth", "tags": {"product": "azurecli", "cause": "automation", - "test": "test_vnf_nsd_publish_and_delete", "date": "2023-10-18T13:38:28Z", - "module": "aosm", "autoDelete": "true", "expiresOn": "2023-11-17T13:38:28.5214058Z"}, - "properties": {"provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '471' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:39 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "name": "automated-tests-ubuntuPublisher", "type": "microsoft.hybridnetwork/publishers", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T13:38:32.2024151Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T13:38:32.2024151Z"}, "identity": {"principalId": "a5b8c784-46ee-4a43-b5ec-4d3a1db603af", - "tenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47", "type": "SystemAssigned"}, - "properties": {"scope": "Private", "provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '760' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:40 GMT - etag: - - '"0700c334-0000-3200-0000-652fdffa0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "name": "ubuntu-acr", "type": "microsoft.hybridnetwork/publishers/artifactstores", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T13:40:13.0005011Z", "lastModifiedBy": - "b8ed041c-aa91-418e-8f47-20c70abc2de1", "lastModifiedByType": "Application", - "lastModifiedAt": "2023-10-18T14:10:27.826506Z"}, "properties": {"storeType": - "AzureContainerRegistry", "replicationStrategy": "SingleReplication", "managedResourceGroupConfiguration": - {"name": "ubuntu-acr-HostedResources-50EFD041", "location": "uaenorth"}, "provisioningState": - "Succeeded", "storageResourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/ubuntu-acr-HostedResources-50EFD041/providers/Microsoft.ContainerRegistry/registries/AutomatedTestsUbuntupublisherUbuntuAcrc4f3741041"}}' - headers: - cache-control: - - no-cache - content-length: - - '1049' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:41 GMT - etag: - - '"00003ef0-0000-3200-0000-652fe7540000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: '{"location": "uaenorth"}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - Content-Length: - - '24' - Content-Type: - - application/json - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu", - "name": "ubuntu", "type": "microsoft.hybridnetwork/publishers/networkservicedesigngroups", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:11:42.2416686Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:11:42.2416686Z"}, "properties": {"description": null, "provisioningState": - "Accepted"}}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/f7470f64-f339-469a-8c67-2511ab04eee5*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '649' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:51 GMT - etag: - - '"0200bd8a-0000-3200-0000-652fe7a70000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/f7470f64-f339-469a-8c67-2511ab04eee5*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/f7470f64-f339-469a-8c67-2511ab04eee5*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "name": "f7470f64-f339-469a-8c67-2511ab04eee5*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu", - "status": "Accepted", "startTime": "2023-10-18T14:11:48.2835374Z"}' - headers: - cache-control: - - no-cache - content-length: - - '582' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:11:51 GMT - etag: - - '"00004815-0000-3200-0000-652fe7a40000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/f7470f64-f339-469a-8c67-2511ab04eee5*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/f7470f64-f339-469a-8c67-2511ab04eee5*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "name": "f7470f64-f339-469a-8c67-2511ab04eee5*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu", - "status": "Succeeded", "startTime": "2023-10-18T14:11:48.2835374Z", "endTime": - "2023-10-18T14:11:57.0579934Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '646' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:12:21 GMT - etag: - - '"00004915-0000-3200-0000-652fe7ad0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu", - "name": "ubuntu", "type": "microsoft.hybridnetwork/publishers/networkservicedesigngroups", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:11:42.2416686Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:11:42.2416686Z"}, "properties": {"description": null, "provisioningState": - "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '650' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:12:21 GMT - etag: - - '"0200c28a-0000-3200-0000-652fe7ad0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "ResourceNotFound", "message": "The Resource ''Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0'' - under resource group ''cli_test_vnf_nsd_000001'' was not found. For more details - please go to https://aka.ms/ARMResourceNotFoundFix"}}' - headers: - cache-control: - - no-cache - content-length: - - '346' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:12:22 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - gateway - status: - code: 404 - message: Not Found -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "9410313761093503784"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "acrManifestNames": {"type": "array", "metadata": {"description": - "Name of the manifest to deploy for the ACR-backed Artifact Store"}}, "armTemplateNames": - {"type": "array", "metadata": {"description": "The name under which to store - the ARM template"}}, "armTemplateVersion": {"type": "string", "metadata": {"description": - "The version that you want to name the NFM template artifact, in format A.B.C. - e.g. 6.13.0. If testing for development, you can use any numbers you like."}}}, - "resources": [{"copy": {"name": "acrArtifactManifests", "count": "[length(parameters(''armTemplateNames''))]"}, - "type": "Microsoft.Hybridnetwork/publishers/artifactStores/artifactManifests", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''acrArtifactStoreName''), parameters(''acrManifestNames'')[copyIndex()])]", - "location": "[parameters(''location'')]", "properties": {"artifacts": [{"artifactName": - "[parameters(''armTemplateNames'')[copyIndex()]]", "artifactType": "ArmTemplate", - "artifactVersion": "[parameters(''armTemplateVersion'')]"}]}}]}, "parameters": - {"location": {"value": "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "acrManifestNames": {"value": - ["ubuntu-vm-nfdg-nf-acr-manifest-1-0-0"]}, "armTemplateNames": {"value": ["ubuntu-vm-nfdg_nf_artifact"]}, - "armTemplateVersion": {"value": "1.0.0"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - Content-Length: - - '2070' - Content-Type: - - application/json - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: POST - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/validate?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638345", - "name": "AOSM_CLI_deployment_1697638345", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "9410313761093503784", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "acrManifestNames": {"type": "Array", "value": - ["ubuntu-vm-nfdg-nf-acr-manifest-1-0-0"]}, "armTemplateNames": {"type": "Array", - "value": ["ubuntu-vm-nfdg_nf_artifact"]}, "armTemplateVersion": {"type": "String", - "value": "1.0.0"}}, "mode": "Incremental", "provisioningState": "Succeeded", - "timestamp": "0001-01-01T00:00:00Z", "duration": "PT0S", "correlationId": - "e403618b-544f-4579-be8c-95f5b315cb43", "providers": [{"namespace": "Microsoft.Hybridnetwork", - "resourceTypes": [{"resourceType": "publishers/artifactStores/artifactManifests", - "locations": ["uaenorth"]}]}], "dependencies": [], "validatedResources": [{"id": - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '1403' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:12:27 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 200 - message: OK -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "9410313761093503784"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "acrManifestNames": {"type": "array", "metadata": {"description": - "Name of the manifest to deploy for the ACR-backed Artifact Store"}}, "armTemplateNames": - {"type": "array", "metadata": {"description": "The name under which to store - the ARM template"}}, "armTemplateVersion": {"type": "string", "metadata": {"description": - "The version that you want to name the NFM template artifact, in format A.B.C. - e.g. 6.13.0. If testing for development, you can use any numbers you like."}}}, - "resources": [{"copy": {"name": "acrArtifactManifests", "count": "[length(parameters(''armTemplateNames''))]"}, - "type": "Microsoft.Hybridnetwork/publishers/artifactStores/artifactManifests", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''acrArtifactStoreName''), parameters(''acrManifestNames'')[copyIndex()])]", - "location": "[parameters(''location'')]", "properties": {"artifacts": [{"artifactName": - "[parameters(''armTemplateNames'')[copyIndex()]]", "artifactType": "ArmTemplate", - "artifactVersion": "[parameters(''armTemplateVersion'')]"}]}}]}, "parameters": - {"location": {"value": "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "acrManifestNames": {"value": - ["ubuntu-vm-nfdg-nf-acr-manifest-1-0-0"]}, "armTemplateNames": {"value": ["ubuntu-vm-nfdg_nf_artifact"]}, - "armTemplateVersion": {"value": "1.0.0"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - Content-Length: - - '2070' - Content-Type: - - application/json - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638345", - "name": "AOSM_CLI_deployment_1697638345", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "9410313761093503784", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "acrManifestNames": {"type": "Array", "value": - ["ubuntu-vm-nfdg-nf-acr-manifest-1-0-0"]}, "armTemplateNames": {"type": "Array", - "value": ["ubuntu-vm-nfdg_nf_artifact"]}, "armTemplateVersion": {"type": "String", - "value": "1.0.0"}}, "mode": "Incremental", "provisioningState": "Accepted", - "timestamp": "2023-10-18T14:12:30.6616781Z", "duration": "PT0.0008635S", "correlationId": - "efcc0799-2a2d-4923-b43b-dbb4cb17d8db", "providers": [{"namespace": "Microsoft.Hybridnetwork", - "resourceTypes": [{"resourceType": "publishers/artifactStores/artifactManifests", - "locations": ["uaenorth"]}]}], "dependencies": []}}' - headers: - azure-asyncoperation: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638345/operationStatuses/08585039685363593619?api-version=2022-09-01 - cache-control: - - no-cache - content-length: - - '1134' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:12:31 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039685363593619?api-version=2022-09-01 - response: - body: - string: '{"status": "Accepted"}' - headers: - cache-control: - - no-cache - content-length: - - '22' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:12:31 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039685363593619?api-version=2022-09-01 - response: - body: - string: '{"status": "Succeeded"}' - headers: - cache-control: - - no-cache - content-length: - - '23' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:20:45 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697638345", - "name": "AOSM_CLI_deployment_1697638345", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "9410313761093503784", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "acrManifestNames": {"type": "Array", "value": - ["ubuntu-vm-nfdg-nf-acr-manifest-1-0-0"]}, "armTemplateNames": {"type": "Array", - "value": ["ubuntu-vm-nfdg_nf_artifact"]}, "armTemplateVersion": {"type": "String", - "value": "1.0.0"}}, "mode": "Incremental", "provisioningState": "Succeeded", - "timestamp": "2023-10-18T14:13:02.9663835Z", "duration": "PT32.3055689S", - "correlationId": "efcc0799-2a2d-4923-b43b-dbb4cb17d8db", "providers": [{"namespace": - "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": "publishers/artifactStores/artifactManifests", - "locations": ["uaenorth"]}]}], "dependencies": [], "outputResources": [{"id": - "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '1417' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:20:46 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0", - "name": "ubuntu-vm-nfdg-nf-acr-manifest-1-0-0", "type": "microsoft.hybridnetwork/publishers/artifactstores/artifactmanifests", - "location": "uaenorth", "systemData": {"createdBy": "achurchard@microsoft.com", - "createdByType": "User", "createdAt": "2023-10-18T14:12:36.4799694Z", "lastModifiedBy": - "achurchard@microsoft.com", "lastModifiedByType": "User", "lastModifiedAt": - "2023-10-18T14:12:36.4799694Z"}, "properties": {"artifacts": [{"artifactName": - "ubuntu-vm-nfdg_nf_artifact", "artifactType": "ArmTemplate", "artifactVersion": - "1.0.0"}], "artifactManifestState": "Uploading", "provisioningState": "Succeeded"}}' - headers: - cache-control: - - no-cache - content-length: - - '872' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:42 GMT - etag: - - '"0400e19a-0000-3200-0000-652fe7e90000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-providerhub-traffic: - - 'True' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: POST - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0/listCredential?api-version=2023-09-01 - response: - body: - string: '{"username": "ubuntu-vm-nfdg-nf-acr-manifest-1-0-0", "acrToken": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", - "acrServerUrl": "https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io", - "repositories": ["ubuntu-vm-nfdg_nf_artifact"], "expiry": "2023-10-19T14:25:46.7261147+00:00", - "credentialType": "AzureContainerRegistryScopedToken"}' - headers: - cache-control: - - no-cache - content-length: - - '357' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:47 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/ - response: - body: - string: '{"errors": [{"code": "UNAUTHORIZED", "message": "authentication required, - visit https://aka.ms/acr/authorization for more information.", "detail": [{"Type": - "repository", "Name": "ubuntu-vm-nfdg_nf_artifact", "Action": "pull"}, {"Type": - "repository", "Name": "ubuntu-vm-nfdg_nf_artifact", "Action": "push"}]}]}' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '310' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:53 GMT - docker-distribution-api-version: - - registry/2.0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - www-authenticate: - - Bearer realm="https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token",service="automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io",scope="repository:ubuntu-vm-nfdg_nf_artifact:push,pull" - x-content-type-options: - - nosniff - status: - code: 401 - message: Unauthorized -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Service: - - automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io - User-Agent: - - oras-py - method: GET - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token?service=automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io&scope=repository%3Aubuntu-vm-nfdg_nf_artifact%3Apush%2Cpull - response: - body: - string: '{"access_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}' - headers: - connection: - - keep-alive - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:53 GMT - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - x-ms-ratelimit-remaining-calls-per-second: - - '333.316667' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/ - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:25:53 GMT - docker-distribution-api-version: - - registry/2.0 - docker-upload-uuid: - - 55b370ff-243b-456a-b07b-14d627dba105 - location: - - /v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/55b370ff-243b-456a-b07b-14d627dba105?_nouploadcache=false&_state=JtxCu3YaSw78XlKjNAyGp2iDHlDDgiIxWJl7uCnAS0l7Ik5hbWUiOiJ1YnVudHUtdm0tbmZkZ19uZl9hcnRpZmFjdCIsIlVVSUQiOiI1NWIzNzBmZi0yNDNiLTQ1NmEtYjA3Yi0xNGQ2MjdkYmExMDUiLCJPZmZzZXQiOjAsIlN0YXJ0ZWRBdCI6IjIwMjMtMTAtMThUMTQ6MjU6NTMuNDc0NDMzMzAxWiJ9 - range: - - 0-0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: "{\n \"$schema\": \"https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#\",\n - \ \"contentVersion\": \"1.0.0.0\",\n \"metadata\": {\n \"_generator\": - {\n \"name\": \"bicep\",\n \"version\": \"0.15.31.15270\",\n - \ \"templateHash\": \"224172526033291650\"\n }\n },\n \"parameters\": - {\n \"publisherName\": {\n \"type\": \"string\",\n \"defaultValue\": - \"automated-tests-ubuntuPublisher\",\n \"metadata\": {\n \"description\": - \"Publisher where the NFD is published\"\n }\n },\n \"publisherResourceGroup\": - {\n \"type\": \"string\",\n \"defaultValue\": \"cli_test_vnf_nsd_qme7qkr2glsiiosfl3fbcm6yr4dvelszx6icqxx4yyuwe4eary6pt4rb7n\",\n - \ \"metadata\": {\n \"description\": \"Resource group - where the NFD publisher exists\"\n }\n },\n \"networkFunctionDefinitionGroupName\": - {\n \"type\": \"string\",\n \"defaultValue\": \"ubuntu-vm-nfdg\",\n - \ \"metadata\": {\n \"description\": \"NFD Group name - for the Network Function\"\n }\n },\n \"ubuntu_vm_nfdg_nfd_version\": - {\n \"type\": \"string\",\n \"metadata\": {\n \"description\": - \"NFD version\"\n }\n },\n \"managedIdentity\": {\n - \ \"type\": \"string\",\n \"metadata\": {\n \"description\": - \"The managed identity that should be used to create the NF.\"\n }\n - \ },\n \"location\": {\n \"type\": \"string\",\n \"defaultValue\": - \"uaenorth\"\n },\n \"nfviType\": {\n \"type\": \"string\",\n - \ \"defaultValue\": \"AzureCore\"\n },\n \"resourceGroupId\": - {\n \"type\": \"string\",\n \"defaultValue\": \"[resourceGroup().id]\"\n - \ },\n \"deploymentParametersObject\": {\n \"type\": - \"secureObject\"\n }\n },\n \"variables\": {\n \"deploymentParameters\": - \"[parameters('deploymentParametersObject').deploymentParameters]\",\n \"identityObject\": - \"[if(equals(parameters('managedIdentity'), ''), createObject('type', 'SystemAssigned'), - createObject('type', 'UserAssigned', 'userAssignedIdentities', createObject(format('{0}', - parameters('managedIdentity')), createObject())))]\"\n },\n \"resources\": - [\n {\n \"copy\": {\n \"name\": \"nf_resource\",\n - \ \"count\": \"[length(variables('deploymentParameters'))]\"\n - \ },\n \"type\": \"Microsoft.HybridNetwork/networkFunctions\",\n - \ \"apiVersion\": \"2023-09-01\",\n \"name\": \"[format('ubuntu-vm-nfdg{0}', - copyIndex())]\",\n \"location\": \"[parameters('location')]\",\n - \ \"identity\": \"[variables('identityObject')]\",\n \"properties\": - {\n \"networkFunctionDefinitionVersionResourceReference\": {\n - \ \"id\": \"[extensionResourceId(format('/subscriptions/{0}/resourceGroups/{1}', - subscription().subscriptionId, parameters('publisherResourceGroup')), 'Microsoft.Hybridnetwork/publishers/networkfunctiondefinitiongroups/networkfunctiondefinitionversions', - parameters('publisherName'), parameters('networkFunctionDefinitionGroupName'), - parameters('ubuntu_vm_nfdg_nfd_version'))]\",\n \"idType\": - \"Open\"\n },\n \"nfviType\": \"[parameters('nfviType')]\",\n - \ \"nfviId\": \"[parameters('resourceGroupId')]\",\n \"allowSoftwareUpdate\": - true,\n \"configurationType\": \"Secret\",\n \"secretDeploymentValues\": - \"[string(variables('deploymentParameters')[copyIndex()])]\"\n }\n - \ }\n ]\n}" - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '3634' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/55b370ff-243b-456a-b07b-14d627dba105?_nouploadcache=false&_state=JtxCu3YaSw78XlKjNAyGp2iDHlDDgiIxWJl7uCnAS0l7Ik5hbWUiOiJ1YnVudHUtdm0tbmZkZ19uZl9hcnRpZmFjdCIsIlVVSUQiOiI1NWIzNzBmZi0yNDNiLTQ1NmEtYjA3Yi0xNGQ2MjdkYmExMDUiLCJPZmZzZXQiOjAsIlN0YXJ0ZWRBdCI6IjIwMjMtMTAtMThUMTQ6MjU6NTMuNDc0NDMzMzAxWiJ9&digest=sha256%3A375e1c0899366f1a4c35c2151ac7c1431ad52ad49d1b4c5b5ce8a2f1add7e36e - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:25:53 GMT - docker-content-digest: - - sha256:375e1c0899366f1a4c35c2151ac7c1431ad52ad49d1b4c5b5ce8a2f1add7e36e - docker-distribution-api-version: - - registry/2.0 - location: - - /v2/ubuntu-vm-nfdg_nf_artifact/blobs/sha256:375e1c0899366f1a4c35c2151ac7c1431ad52ad49d1b4c5b5ce8a2f1add7e36e - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/ - response: - body: - string: '{"errors": [{"code": "UNAUTHORIZED", "message": "authentication required, - visit https://aka.ms/acr/authorization for more information.", "detail": [{"Type": - "repository", "Name": "ubuntu-vm-nfdg_nf_artifact", "Action": "pull"}, {"Type": - "repository", "Name": "ubuntu-vm-nfdg_nf_artifact", "Action": "push"}]}]}' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '310' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:53 GMT - docker-distribution-api-version: - - registry/2.0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - www-authenticate: - - Bearer realm="https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token",service="automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io",scope="repository:ubuntu-vm-nfdg_nf_artifact:pull,push" - x-content-type-options: - - nosniff - status: - code: 401 - message: Unauthorized -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Service: - - automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io - User-Agent: - - oras-py - method: GET - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token?service=automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io&scope=repository%3Aubuntu-vm-nfdg_nf_artifact%3Apull%2Cpush - response: - body: - string: '{"access_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}' - headers: - connection: - - keep-alive - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:54 GMT - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - x-ms-ratelimit-remaining-calls-per-second: - - '333.3' - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: POST - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/ - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:25:54 GMT - docker-distribution-api-version: - - registry/2.0 - docker-upload-uuid: - - d22110b6-d772-48e3-bf3b-04e4ce643eae - location: - - /v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/d22110b6-d772-48e3-bf3b-04e4ce643eae?_nouploadcache=false&_state=qCFRAnhotjGMxuG5YCwxEDgF1NablZcPQrukI__ajwl7Ik5hbWUiOiJ1YnVudHUtdm0tbmZkZ19uZl9hcnRpZmFjdCIsIlVVSUQiOiJkMjIxMTBiNi1kNzcyLTQ4ZTMtYmYzYi0wNGU0Y2U2NDNlYWUiLCJPZmZzZXQiOjAsIlN0YXJ0ZWRBdCI6IjIwMjMtMTAtMThUMTQ6MjU6NTQuMjA0NzQxMjk2WiJ9 - range: - - 0-0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '0' - Content-Type: - - application/octet-stream - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/blobs/uploads/d22110b6-d772-48e3-bf3b-04e4ce643eae?_nouploadcache=false&_state=qCFRAnhotjGMxuG5YCwxEDgF1NablZcPQrukI__ajwl7Ik5hbWUiOiJ1YnVudHUtdm0tbmZkZ19uZl9hcnRpZmFjdCIsIlVVSUQiOiJkMjIxMTBiNi1kNzcyLTQ4ZTMtYmYzYi0wNGU0Y2U2NDNlYWUiLCJPZmZzZXQiOjAsIlN0YXJ0ZWRBdCI6IjIwMjMtMTAtMThUMTQ6MjU6NTQuMjA0NzQxMjk2WiJ9&digest=sha256%3Ae3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:25:54 GMT - docker-content-digest: - - sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - docker-distribution-api-version: - - registry/2.0 - location: - - /v2/ubuntu-vm-nfdg_nf_artifact/blobs/sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 201 - message: Created -- request: - body: '{"schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", - "config": {"mediaType": "application/vnd.unknown.config.v1+json", "size": 0, - "digest": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - "layers": [{"mediaType": "application/vnd.oci.image.layer.v1.tar", "size": 3634, - "digest": "sha256:375e1c0899366f1a4c35c2151ac7c1431ad52ad49d1b4c5b5ce8a2f1add7e36e", - "annotations": {"org.opencontainers.image.title": "nf_definition.json"}}], "annotations": - {}}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '502' - Content-Type: - - application/vnd.oci.image.manifest.v1+json - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/manifests/1.0.0 - response: - body: - string: '{"errors": [{"code": "UNAUTHORIZED", "message": "authentication required, - visit https://aka.ms/acr/authorization for more information.", "detail": [{"Type": - "repository", "Name": "ubuntu-vm-nfdg_nf_artifact", "Action": "pull"}, {"Type": - "repository", "Name": "ubuntu-vm-nfdg_nf_artifact", "Action": "push"}]}]}' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '310' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:54 GMT - docker-distribution-api-version: - - registry/2.0 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - www-authenticate: - - Bearer realm="https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token",service="automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io",scope="repository:ubuntu-vm-nfdg_nf_artifact:pull,push" - x-content-type-options: - - nosniff - status: - code: 401 - message: Unauthorized -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Service: - - automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io - User-Agent: - - oras-py - method: GET - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/oauth2/token?service=automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io&scope=repository%3Aubuntu-vm-nfdg_nf_artifact%3Apull%2Cpush - response: - body: - string: '{"access_token": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}' - headers: - connection: - - keep-alive - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:25:54 GMT - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - x-ms-ratelimit-remaining-calls-per-second: - - '333.283333' - status: - code: 200 - message: OK -- request: - body: '{"schemaVersion": 2, "mediaType": "application/vnd.oci.image.manifest.v1+json", - "config": {"mediaType": "application/vnd.unknown.config.v1+json", "size": 0, - "digest": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - "layers": [{"mediaType": "application/vnd.oci.image.layer.v1.tar", "size": 3634, - "digest": "sha256:375e1c0899366f1a4c35c2151ac7c1431ad52ad49d1b4c5b5ce8a2f1add7e36e", - "annotations": {"org.opencontainers.image.title": "nf_definition.json"}}], "annotations": - {}}' - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '502' - Content-Type: - - application/vnd.oci.image.manifest.v1+json - User-Agent: - - python-requests/2.26.0 - method: PUT - uri: https://automatedtestsubuntupublisherubuntuacrc4f3741041.azurecr.io/v2/ubuntu-vm-nfdg_nf_artifact/manifests/1.0.0 - response: - body: - string: '' - headers: - access-control-expose-headers: - - Docker-Content-Digest - - WWW-Authenticate - - Link - - X-Ms-Correlation-Request-Id - connection: - - keep-alive - content-length: - - '0' - date: - - Wed, 18 Oct 2023 14:26:00 GMT - docker-content-digest: - - sha256:2011ec987725418a15590818992a2e016e8b2a9feef2bd1c9481dce060e95180 - docker-distribution-api-version: - - registry/2.0 - location: - - /v2/ubuntu-vm-nfdg_nf_artifact/manifests/sha256:2011ec987725418a15590818992a2e016e8b2a9feef2bd1c9481dce060e95180 - server: - - openresty - strict-transport-security: - - max-age=31536000; includeSubDomains - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 201 - message: Created -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "14831763147023388379"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "nsDesignGroup": {"type": "string", "metadata": {"description": - "Name of an existing Network Service Design Group"}}, "nsDesignVersion": {"type": - "string", "metadata": {"description": "The version of the NSDV you want to create, - in format A.B.C"}}, "nfviSiteName": {"type": "string", "defaultValue": "ubuntu_NFVI", - "metadata": {"description": "Name of the nfvi site"}}}, "variables": {"$fxv#0": - {"$schema": "https://json-schema.org/draft-07/schema#", "title": "ubuntu_ConfigGroupSchema", - "type": "object", "properties": {"ubuntu-vm-nfdg": {"type": "object", "properties": - {"deploymentParameters": {"type": "object", "properties": {"location": {"type": - "string"}, "subnetName": {"type": "string"}, "ubuntuVmName": {"type": "string"}, - "virtualNetworkId": {"type": "string"}, "sshPublicKeyAdmin": {"type": "string"}}}, - "ubuntu_vm_nfdg_nfd_version": {"type": "string", "description": "The version - of the ubuntu-vm-nfdg NFD to use. This version must be compatible with (have - the same parameters exposed as) ubuntu-vm-nfdg."}}, "required": ["deploymentParameters", - "ubuntu_vm_nfdg_nfd_version"]}, "managedIdentity": {"type": "string", "description": - "The managed identity to use to deploy NFs within this SNS. This should be - of the form ''/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. If - you wish to use a system assigned identity, set this to a blank string."}}, - "required": ["ubuntu-vm-nfdg", "managedIdentity"]}, "$fxv#1": {"deploymentParametersObject": - {"deploymentParameters": ["{configurationparameters(''ubuntu_ConfigGroupSchema'').ubuntu-vm-nfdg.deploymentParameters}"]}, - "ubuntu_vm_nfdg_nfd_version": "{configurationparameters(''ubuntu_ConfigGroupSchema'').ubuntu-vm-nfdg.ubuntu_vm_nfdg_nfd_version}", - "managedIdentity": "{configurationparameters(''ubuntu_ConfigGroupSchema'').managedIdentity}"}}, - "resources": [{"type": "Microsoft.Hybridnetwork/publishers/configurationGroupSchemas", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}'', parameters(''publisherName''), - ''ubuntu_ConfigGroupSchema'')]", "location": "[parameters(''location'')]", "properties": - {"schemaDefinition": "[string(variables(''$fxv#0''))]"}}, {"type": "Microsoft.Hybridnetwork/publishers/networkservicedesigngroups/networkservicedesignversions", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''nsDesignGroup''), parameters(''nsDesignVersion''))]", "location": - "[parameters(''location'')]", "properties": {"description": "Plain ubuntu VM", - "versionState": "Preview", "configurationGroupSchemaReferences": {"ubuntu_ConfigGroupSchema": - {"id": "[resourceId(''Microsoft.Hybridnetwork/publishers/configurationGroupSchemas'', - parameters(''publisherName''), ''ubuntu_ConfigGroupSchema'')]"}}, "nfvisFromSite": - {"nfvi1": {"name": "[parameters(''nfviSiteName'')]", "type": "AzureCore"}}, - "resourceElementTemplates": [{"name": "ubuntu-vm-nfdg_nf_artifact_resource_element", - "type": "NetworkFunctionDefinition", "configuration": {"artifactProfile": {"artifactStoreReference": - {"id": "[resourceId(''Microsoft.HybridNetwork/publishers/artifactStores'', parameters(''publisherName''), - parameters(''acrArtifactStoreName''))]"}, "artifactName": "ubuntu-vm-nfdg_nf_artifact", - "artifactVersion": "1.0.0"}, "templateType": "ArmTemplate", "parameterValues": - "[string(variables(''$fxv#1''))]"}, "dependsOnProfile": {"installDependsOn": - [], "uninstallDependsOn": [], "updateDependsOn": []}}]}, "dependsOn": ["[resourceId(''Microsoft.Hybridnetwork/publishers/configurationGroupSchemas'', - parameters(''publisherName''), ''ubuntu_ConfigGroupSchema'')]"]}]}, "parameters": - {"location": {"value": "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "nsDesignGroup": {"value": - "ubuntu"}, "nsDesignVersion": {"value": "1.0.0"}, "nfviSiteName": {"value": - "ubuntu_NFVI"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - Content-Length: - - '4560' - Content-Type: - - application/json - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: POST - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/validate?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697639164", - "name": "AOSM_CLI_deployment_1697639164", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "14831763147023388379", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "nsDesignGroup": {"type": "String", "value": - "ubuntu"}, "nsDesignVersion": {"type": "String", "value": "1.0.0"}, "nfviSiteName": - {"type": "String", "value": "ubuntu_NFVI"}}, "mode": "Incremental", "provisioningState": - "Succeeded", "timestamp": "0001-01-01T00:00:00Z", "duration": "PT0S", "correlationId": - "fc094be4-8edd-42e9-aafd-5e4049d17f6c", "providers": [{"namespace": "Microsoft.Hybridnetwork", - "resourceTypes": [{"resourceType": "publishers/configurationGroupSchemas", - "locations": ["uaenorth"]}, {"resourceType": "publishers/networkservicedesigngroups/networkservicedesignversions", - "locations": ["uaenorth"]}]}], "dependencies": [{"dependsOn": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema", - "resourceType": "Microsoft.Hybridnetwork/publishers/configurationGroupSchemas", - "resourceName": "automated-tests-ubuntuPublisher/ubuntu_ConfigGroupSchema"}], - "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkservicedesigngroups/ubuntu/networkservicedesignversions/1.0.0", - "resourceType": "Microsoft.Hybridnetwork/publishers/networkservicedesigngroups/networkservicedesignversions", - "resourceName": "automated-tests-ubuntuPublisher/ubuntu/1.0.0"}], "validatedResources": - [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema"}, - {"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkservicedesigngroups/ubuntu/networkservicedesignversions/1.0.0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '2494' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:26:06 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 200 - message: OK -- request: - body: '{"properties": {"template": {"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", - "contentVersion": "1.0.0.0", "metadata": {"_generator": {"name": "bicep", "version": - "0.15.31.15270", "templateHash": "14831763147023388379"}}, "parameters": {"location": - {"type": "string"}, "publisherName": {"type": "string", "metadata": {"description": - "Name of an existing publisher, expected to be in the resource group where you - deploy the template"}}, "acrArtifactStoreName": {"type": "string", "metadata": - {"description": "Name of an existing ACR-backed Artifact Store, deployed under - the publisher."}}, "nsDesignGroup": {"type": "string", "metadata": {"description": - "Name of an existing Network Service Design Group"}}, "nsDesignVersion": {"type": - "string", "metadata": {"description": "The version of the NSDV you want to create, - in format A.B.C"}}, "nfviSiteName": {"type": "string", "defaultValue": "ubuntu_NFVI", - "metadata": {"description": "Name of the nfvi site"}}}, "variables": {"$fxv#0": - {"$schema": "https://json-schema.org/draft-07/schema#", "title": "ubuntu_ConfigGroupSchema", - "type": "object", "properties": {"ubuntu-vm-nfdg": {"type": "object", "properties": - {"deploymentParameters": {"type": "object", "properties": {"location": {"type": - "string"}, "subnetName": {"type": "string"}, "ubuntuVmName": {"type": "string"}, - "virtualNetworkId": {"type": "string"}, "sshPublicKeyAdmin": {"type": "string"}}}, - "ubuntu_vm_nfdg_nfd_version": {"type": "string", "description": "The version - of the ubuntu-vm-nfdg NFD to use. This version must be compatible with (have - the same parameters exposed as) ubuntu-vm-nfdg."}}, "required": ["deploymentParameters", - "ubuntu_vm_nfdg_nfd_version"]}, "managedIdentity": {"type": "string", "description": - "The managed identity to use to deploy NFs within this SNS. This should be - of the form ''/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}. If - you wish to use a system assigned identity, set this to a blank string."}}, - "required": ["ubuntu-vm-nfdg", "managedIdentity"]}, "$fxv#1": {"deploymentParametersObject": - {"deploymentParameters": ["{configurationparameters(''ubuntu_ConfigGroupSchema'').ubuntu-vm-nfdg.deploymentParameters}"]}, - "ubuntu_vm_nfdg_nfd_version": "{configurationparameters(''ubuntu_ConfigGroupSchema'').ubuntu-vm-nfdg.ubuntu_vm_nfdg_nfd_version}", - "managedIdentity": "{configurationparameters(''ubuntu_ConfigGroupSchema'').managedIdentity}"}}, - "resources": [{"type": "Microsoft.Hybridnetwork/publishers/configurationGroupSchemas", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}'', parameters(''publisherName''), - ''ubuntu_ConfigGroupSchema'')]", "location": "[parameters(''location'')]", "properties": - {"schemaDefinition": "[string(variables(''$fxv#0''))]"}}, {"type": "Microsoft.Hybridnetwork/publishers/networkservicedesigngroups/networkservicedesignversions", - "apiVersion": "2023-09-01", "name": "[format(''{0}/{1}/{2}'', parameters(''publisherName''), - parameters(''nsDesignGroup''), parameters(''nsDesignVersion''))]", "location": - "[parameters(''location'')]", "properties": {"description": "Plain ubuntu VM", - "versionState": "Preview", "configurationGroupSchemaReferences": {"ubuntu_ConfigGroupSchema": - {"id": "[resourceId(''Microsoft.Hybridnetwork/publishers/configurationGroupSchemas'', - parameters(''publisherName''), ''ubuntu_ConfigGroupSchema'')]"}}, "nfvisFromSite": - {"nfvi1": {"name": "[parameters(''nfviSiteName'')]", "type": "AzureCore"}}, - "resourceElementTemplates": [{"name": "ubuntu-vm-nfdg_nf_artifact_resource_element", - "type": "NetworkFunctionDefinition", "configuration": {"artifactProfile": {"artifactStoreReference": - {"id": "[resourceId(''Microsoft.HybridNetwork/publishers/artifactStores'', parameters(''publisherName''), - parameters(''acrArtifactStoreName''))]"}, "artifactName": "ubuntu-vm-nfdg_nf_artifact", - "artifactVersion": "1.0.0"}, "templateType": "ArmTemplate", "parameterValues": - "[string(variables(''$fxv#1''))]"}, "dependsOnProfile": {"installDependsOn": - [], "uninstallDependsOn": [], "updateDependsOn": []}}]}, "dependsOn": ["[resourceId(''Microsoft.Hybridnetwork/publishers/configurationGroupSchemas'', - parameters(''publisherName''), ''ubuntu_ConfigGroupSchema'')]"]}]}, "parameters": - {"location": {"value": "uaenorth"}, "publisherName": {"value": "automated-tests-ubuntuPublisher"}, - "acrArtifactStoreName": {"value": "ubuntu-acr"}, "nsDesignGroup": {"value": - "ubuntu"}, "nsDesignVersion": {"value": "1.0.0"}, "nfviSiteName": {"value": - "ubuntu_NFVI"}}, "mode": "Incremental"}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - Content-Length: - - '4560' - Content-Type: - - application/json - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: PUT - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697639164", - "name": "AOSM_CLI_deployment_1697639164", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "14831763147023388379", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "nsDesignGroup": {"type": "String", "value": - "ubuntu"}, "nsDesignVersion": {"type": "String", "value": "1.0.0"}, "nfviSiteName": - {"type": "String", "value": "ubuntu_NFVI"}}, "mode": "Incremental", "provisioningState": - "Accepted", "timestamp": "2023-10-18T14:26:09.8001Z", "duration": "PT0.0006866S", - "correlationId": "68d53d12-c962-4078-93b5-7c791983f5a4", "providers": [{"namespace": - "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": "publishers/configurationGroupSchemas", - "locations": ["uaenorth"]}, {"resourceType": "publishers/networkservicedesigngroups/networkservicedesignversions", - "locations": ["uaenorth"]}]}], "dependencies": [{"dependsOn": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema", - "resourceType": "Microsoft.Hybridnetwork/publishers/configurationGroupSchemas", - "resourceName": "automated-tests-ubuntuPublisher/ubuntu_ConfigGroupSchema"}], - "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkservicedesigngroups/ubuntu/networkservicedesignversions/1.0.0", - "resourceType": "Microsoft.Hybridnetwork/publishers/networkservicedesigngroups/networkservicedesignversions", - "resourceName": "automated-tests-ubuntuPublisher/ubuntu/1.0.0"}]}}' - headers: - azure-asyncoperation: - - https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697639164/operationStatuses/08585039677173127677?api-version=2022-09-01 - cache-control: - - no-cache - content-length: - - '2004' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:26:10 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-ratelimit-remaining-subscription-writes: - - '1199' - status: - code: 201 - message: Created -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039677173127677?api-version=2022-09-01 - response: - body: - string: '{"status": "Accepted"}' - headers: - cache-control: - - no-cache - content-length: - - '22' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:26:10 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039677173127677?api-version=2022-09-01 - response: - body: - string: '{"status": "Running"}' - headers: - cache-control: - - no-cache - content-length: - - '21' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:26:40 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039677173127677?api-version=2022-09-01 - response: - body: - string: '{"status": "Running"}' - headers: - cache-control: - - no-cache - content-length: - - '21' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:27:11 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039677173127677?api-version=2022-09-01 - response: - body: - string: '{"status": "Running"}' - headers: - cache-control: - - no-cache - content-length: - - '21' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:27:40 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment/operationStatuses/08585039677173127677?api-version=2022-09-01 - response: - body: - string: '{"status": "Succeeded"}' - headers: - cache-control: - - no-cache - content-length: - - '23' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:10 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd publish - Connection: - - keep-alive - ParameterSetName: - - -f - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/mock-deployment?api-version=2022-09-01 - response: - body: - string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Resources/deployments/AOSM_CLI_deployment_1697639164", - "name": "AOSM_CLI_deployment_1697639164", "type": "Microsoft.Resources/deployments", - "properties": {"templateHash": "14831763147023388379", "parameters": {"location": - {"type": "String", "value": "uaenorth"}, "publisherName": {"type": "String", - "value": "automated-tests-ubuntuPublisher"}, "acrArtifactStoreName": {"type": - "String", "value": "ubuntu-acr"}, "nsDesignGroup": {"type": "String", "value": - "ubuntu"}, "nsDesignVersion": {"type": "String", "value": "1.0.0"}, "nfviSiteName": - {"type": "String", "value": "ubuntu_NFVI"}}, "mode": "Incremental", "provisioningState": - "Succeeded", "timestamp": "2023-10-18T14:27:50.8828306Z", "duration": "PT1M41.0834172S", - "correlationId": "68d53d12-c962-4078-93b5-7c791983f5a4", "providers": [{"namespace": - "Microsoft.Hybridnetwork", "resourceTypes": [{"resourceType": "publishers/configurationGroupSchemas", - "locations": ["uaenorth"]}, {"resourceType": "publishers/networkservicedesigngroups/networkservicedesignversions", - "locations": ["uaenorth"]}]}], "dependencies": [{"dependsOn": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema", - "resourceType": "Microsoft.Hybridnetwork/publishers/configurationGroupSchemas", - "resourceName": "automated-tests-ubuntuPublisher/ubuntu_ConfigGroupSchema"}], - "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkservicedesigngroups/ubuntu/networkservicedesignversions/1.0.0", - "resourceType": "Microsoft.Hybridnetwork/publishers/networkservicedesigngroups/networkservicedesignversions", - "resourceName": "automated-tests-ubuntuPublisher/ubuntu/1.0.0"}], "outputResources": - [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema"}, - {"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.Hybridnetwork/publishers/automated-tests-ubuntuPublisher/networkservicedesigngroups/ubuntu/networkservicedesignversions/1.0.0"}]}}' - headers: - cache-control: - - no-cache - content-length: - - '2510' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:11 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-2023-09-01"}' - headers: - cache-control: - - no-cache - content-length: - - '290' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:11 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-Publisher"}' - headers: - cache-control: - - no-cache - content-length: - - '288' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:11 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu/networkServiceDesignVersions/1.0.0?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:21 GMT - etag: - - '"6b002c53-0000-3200-0000-652feb860000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14999' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31", - "name": "8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu/networkServiceDesignVersions/1.0.0", - "status": "Deleting", "startTime": "2023-10-18T14:28:18.2913406Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '617' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:21 GMT - etag: - - '"00005a15-0000-3200-0000-652feb820000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31", - "name": "8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu/networkServiceDesignVersions/1.0.0", - "status": "Succeeded", "startTime": "2023-10-18T14:28:18.2913406Z", "endTime": - "2023-10-18T14:28:34.6929438Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '681' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:51 GMT - etag: - - '"00005b15-0000-3200-0000-652feb920000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31", - "name": "8d779970-9490-4a00-831f-213151746155*E88B263ED337FF76C2D622550441DD41C00F955FD1DD7EA56D77D1FB17FF2C31", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu/networkServiceDesignVersions/1.0.0", - "status": "Succeeded", "startTime": "2023-10-18T14:28:18.2913406Z", "endTime": - "2023-10-18T14:28:34.6929438Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '681' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:51 GMT - etag: - - '"00005b15-0000-3200-0000-652feb920000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:57 GMT - etag: - - '"0400e29a-0000-3200-0000-652febaa0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14998' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8", - "name": "fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0", - "status": "Deleting", "startTime": "2023-10-18T14:28:55.2143789Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '629' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:28:59 GMT - etag: - - '"00005c15-0000-3200-0000-652feba70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8", - "name": "fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0", - "status": "Succeeded", "startTime": "2023-10-18T14:28:55.2143789Z", "endTime": - "2023-10-18T14:29:20.0194214Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '693' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:29:28 GMT - etag: - - '"00005d15-0000-3200-0000-652febc00000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8", - "name": "fdcac423-e8de-4316-91e3-b330020c8439*04D828A0BC42F9FD0D331CDBE7E05FBB6E6E4EE184AB19A69FCD9813BBC707D8", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-nfdg-nf-acr-manifest-1-0-0", - "status": "Succeeded", "startTime": "2023-10-18T14:28:55.2143789Z", "endTime": - "2023-10-18T14:29:20.0194214Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '693' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:29:28 GMT - etag: - - '"00005d15-0000-3200-0000-652febc00000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:29:34 GMT - etag: - - '"0100d40e-0000-3200-0000-652febce0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14997' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5", - "name": "973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema", - "status": "Deleting", "startTime": "2023-10-18T14:29:31.4080127Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '599' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:29:34 GMT - etag: - - '"00005e15-0000-3200-0000-652febcb0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5", - "name": "973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema", - "status": "Succeeded", "startTime": "2023-10-18T14:29:31.4080127Z", "endTime": - "2023-10-18T14:29:38.9124764Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '663' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:03 GMT - etag: - - '"00005f15-0000-3200-0000-652febd30000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5", - "name": "973bb0a5-ba72-4d04-ba65-4b6d6c854d1d*9DF3B8143E719DF91DFC194B2DDB94A503AB3B0DDDB65606A2F75DDC1540DFD5", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/configurationGroupSchemas/ubuntu_ConfigGroupSchema", - "status": "Succeeded", "startTime": "2023-10-18T14:29:31.4080127Z", "endTime": - "2023-10-18T14:29:38.9124764Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '663' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:03 GMT - etag: - - '"00005f15-0000-3200-0000-652febd30000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:10 GMT - etag: - - '"0200c08f-0000-3200-0000-652febf20000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14996' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "name": "b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu", - "status": "Deleting", "startTime": "2023-10-18T14:30:08.4068021Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '582' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:10 GMT - etag: - - '"00006015-0000-3200-0000-652febf00000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "name": "b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu", - "status": "Succeeded", "startTime": "2023-10-18T14:30:08.4068021Z", "endTime": - "2023-10-18T14:30:16.3047192Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '646' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:40 GMT - etag: - - '"00006215-0000-3200-0000-652febf80000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nsd delete - Connection: - - keep-alive - ParameterSetName: - - -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "name": "b01d9f94-f6cf-49ca-bf3d-886ef876bed0*07B306C85FFAFCD22DF0677DA697E92BF9D641954995CBCE968C98ADC599FA3F", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkServiceDesignGroups/ubuntu", - "status": "Succeeded", "startTime": "2023-10-18T14:30:08.4068021Z", "endTime": - "2023-10-18T14:30:16.3047192Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '646' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:40 GMT - etag: - - '"00006215-0000-3200-0000-652febf80000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-2023-09-01", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-2023-09-01"}' - headers: - cache-control: - - no-cache - content-length: - - '290' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:40 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json, text/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-azure-mgmt-resource/23.1.0b2 Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher?api-version=2021-07-01 - response: - body: - string: '{"properties": {"state": "Registered"}, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Features/providers/Microsoft.HybridNetwork/features/Allow-Publisher", - "type": "Microsoft.Features/providers/features", "name": "Microsoft.HybridNetwork/Allow-Publisher"}' - headers: - cache-control: - - no-cache - content-length: - - '288' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:40 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding,Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg/networkFunctionDefinitionVersions/1.0.0?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:45 GMT - etag: - - '"1c002f33-0000-3200-0000-652fec150000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14999' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "name": "4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg/networkFunctionDefinitionVersions/1.0.0", - "status": "Deleting", "startTime": "2023-10-18T14:30:44.6894841Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '635' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:30:45 GMT - etag: - - '"00006315-0000-3200-0000-652fec140000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "name": "4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg/networkFunctionDefinitionVersions/1.0.0", - "status": "Deleting", "startTime": "2023-10-18T14:30:44.6894841Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '635' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:31:15 GMT - etag: - - '"00006315-0000-3200-0000-652fec140000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "name": "4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg/networkFunctionDefinitionVersions/1.0.0", - "status": "Deleting", "startTime": "2023-10-18T14:30:44.6894841Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '635' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:31:44 GMT - etag: - - '"00006315-0000-3200-0000-652fec140000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "name": "4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg/networkFunctionDefinitionVersions/1.0.0", - "status": "Succeeded", "startTime": "2023-10-18T14:30:44.6894841Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '656' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:14 GMT - etag: - - '"00006615-0000-3200-0000-652fec620000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "name": "4192fd7c-b0b0-4b0e-96a2-c77f43a1be4e*F84C1FD746CF0428B936256E6F68F15D7B48D7A2FD0C6737E46476EC83657278", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg/networkFunctionDefinitionVersions/1.0.0", - "status": "Succeeded", "startTime": "2023-10-18T14:30:44.6894841Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '656' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:15 GMT - etag: - - '"00006615-0000-3200-0000-652fec620000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:23 GMT - etag: - - '"0400e39a-0000-3200-0000-652fec770000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14998' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5", - "name": "4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0", - "status": "Deleting", "startTime": "2023-10-18T14:32:20.255426Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '626' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:23 GMT - etag: - - '"00006715-0000-3200-0000-652fec740000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5", - "name": "4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0", - "status": "Succeeded", "startTime": "2023-10-18T14:32:20.255426Z", "endTime": - "2023-10-18T14:32:30.2954477Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '690' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:53 GMT - etag: - - '"00006815-0000-3200-0000-652fec7e0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5", - "name": "4a1b9e23-9935-4642-898b-8f3c566a5842*88DFF89EF75DC06584B66049F832948B9C64D744B47D37D395A8092EFBD4C9B5", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store/artifactManifests/ubuntu-vm-sa-manifest-1-0-0", - "status": "Succeeded", "startTime": "2023-10-18T14:32:20.255426Z", "endTime": - "2023-10-18T14:32:30.2954477Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '690' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:53 GMT - etag: - - '"00006815-0000-3200-0000-652fec7e0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:57 GMT - etag: - - '"0400e49a-0000-3200-0000-652fec9a0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14997' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2", - "name": "eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0", - "status": "Deleting", "startTime": "2023-10-18T14:32:55.7858491Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '621' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:32:58 GMT - etag: - - '"00006915-0000-3200-0000-652fec970000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2", - "name": "eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0", - "status": "Succeeded", "startTime": "2023-10-18T14:32:55.7858491Z", "endTime": - "2023-10-18T14:33:18.9183534Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '685' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:33:28 GMT - etag: - - '"00006a15-0000-3200-0000-652fecaf0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2", - "name": "eb6aac3e-18ef-4bf2-a0b1-efe324212606*A3468C8737AC969A332E30033B1918E798E4485F614959DC40E9DA6FC602C5F2", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr/artifactManifests/ubuntu-vm-acr-manifest-1-0-0", - "status": "Succeeded", "startTime": "2023-10-18T14:32:55.7858491Z", "endTime": - "2023-10-18T14:33:18.9183534Z", "properties": null}' - headers: - cache-control: - - no-cache - content-length: - - '685' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:33:28 GMT - etag: - - '"00006a15-0000-3200-0000-652fecaf0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:33:37 GMT - etag: - - '"01005091-0000-3200-0000-652fecc10000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14996' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Deleting", "startTime": "2023-10-18T14:33:36.545335Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '594' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:33:37 GMT - etag: - - '"00006b15-0000-3200-0000-652fecc00000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Deleting", "startTime": "2023-10-18T14:33:36.545335Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '594' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:34:06 GMT - etag: - - '"00006b15-0000-3200-0000-652fecc00000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Deleting", "startTime": "2023-10-18T14:33:36.545335Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '594' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:34:36 GMT - etag: - - '"00006b15-0000-3200-0000-652fecc00000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Succeeded", "startTime": "2023-10-18T14:33:36.545335Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '615' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:35:06 GMT - etag: - - '"00007115-0000-3200-0000-652fed0e0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "name": "8fb2b177-8a4a-498a-8263-963a1f9214b7*3D7B424C608F0FE023A3EF90EDF0DE88CBB6EC0A899EC2E141CCB27619CA7BDF", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/networkFunctionDefinitionGroups/ubuntu-vm-nfdg", - "status": "Succeeded", "startTime": "2023-10-18T14:33:36.545335Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '615' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:35:07 GMT - etag: - - '"00007115-0000-3200-0000-652fed0e0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:35:10 GMT - etag: - - '"000070f3-0000-3200-0000-652fed1e0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14995' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Deleting", "startTime": "2023-10-18T14:35:10.5758639Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '574' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:35:11 GMT - etag: - - '"00007215-0000-3200-0000-652fed1e0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Deleting", "startTime": "2023-10-18T14:35:10.5758639Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '574' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:35:40 GMT - etag: - - '"00007215-0000-3200-0000-652fed1e0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Deleting", "startTime": "2023-10-18T14:35:10.5758639Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '574' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:36:10 GMT - etag: - - '"00007215-0000-3200-0000-652fed1e0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Deleting", "startTime": "2023-10-18T14:35:10.5758639Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '574' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:36:39 GMT - etag: - - '"00007215-0000-3200-0000-652fed1e0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Deleting", "startTime": "2023-10-18T14:35:10.5758639Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '574' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:37:09 GMT - etag: - - '"00007215-0000-3200-0000-652fed1e0000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Succeeded", "startTime": "2023-10-18T14:35:10.5758639Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '595' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:37:38 GMT - etag: - - '"00007815-0000-3200-0000-652feda80000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "name": "2a40aefb-1667-479c-892e-5aba8ec8598a*C489A37845DCE4DE35485E84536556E7E5E2C311C656CB69846034646BD1F74D", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-acr", - "status": "Succeeded", "startTime": "2023-10-18T14:35:10.5758639Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '595' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:37:39 GMT - etag: - - '"00007815-0000-3200-0000-652feda80000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:37:43 GMT - etag: - - '"0000daf3-0000-3200-0000-652fedb80000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14994' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Deleting", "startTime": "2023-10-18T14:37:43.7283155Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:37:43 GMT - etag: - - '"00007915-0000-3200-0000-652fedb70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Deleting", "startTime": "2023-10-18T14:37:43.7283155Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:38:13 GMT - etag: - - '"00007915-0000-3200-0000-652fedb70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Deleting", "startTime": "2023-10-18T14:37:43.7283155Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:38:43 GMT - etag: - - '"00007915-0000-3200-0000-652fedb70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Deleting", "startTime": "2023-10-18T14:37:43.7283155Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:39:13 GMT - etag: - - '"00007915-0000-3200-0000-652fedb70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Deleting", "startTime": "2023-10-18T14:37:43.7283155Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:39:42 GMT - etag: - - '"00007915-0000-3200-0000-652fedb70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Deleting", "startTime": "2023-10-18T14:37:43.7283155Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:40:12 GMT - etag: - - '"00007915-0000-3200-0000-652fedb70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Deleting", "startTime": "2023-10-18T14:37:43.7283155Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '581' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:40:41 GMT - etag: - - '"00007915-0000-3200-0000-652fedb70000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Succeeded", "startTime": "2023-10-18T14:37:43.7283155Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '602' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:41:12 GMT - etag: - - '"00007c15-0000-3200-0000-652fee7e0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "name": "271d638b-e5f2-42c8-846f-13742271f402*B741486A4E495F3D1985CCB3A2E57162B9345100065F16E9106C8F7672182F46", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher/artifactStores/ubuntu-blob-store", - "status": "Succeeded", "startTime": "2023-10-18T14:37:43.7283155Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '602' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:41:12 GMT - etag: - - '"00007c15-0000-3200-0000-652fee7e0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher?api-version=2023-09-01 - response: - body: - string: '{"error": {"code": "GatewayTimeout", "message": "Server failed to process - the request. Tracking Id: ''d7416ab1-2907-4fd2-82f0-89ce8e1fa770''."}}' - headers: - cache-control: - - no-cache - connection: - - close - content-length: - - '142' - content-type: - - application/json - date: - - Wed, 18 Oct 2023 14:42:13 GMT - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-failure-cause: - - service - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14993' - status: - code: 504 - message: Gateway Timeout -- request: - body: null - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - Content-Length: - - '0' - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: DELETE - uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher?api-version=2023-09-01 - response: - body: - string: 'null' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '4' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:42:41 GMT - etag: - - '"07005e51-0000-3200-0000-652feee20000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - x-ms-build-version: - - 1.0.02477.1998 - x-ms-providerhub-traffic: - - 'True' - x-ms-ratelimit-remaining-subscription-deletes: - - '14999' - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Deleting", "startTime": "2023-10-18T14:42:41.1606344Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '548' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:42:42 GMT - etag: - - '"00007e15-0000-3200-0000-652feee10000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Deleting", "startTime": "2023-10-18T14:42:41.1606344Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '548' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:43:11 GMT - etag: - - '"00007e15-0000-3200-0000-652feee10000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Deleting", "startTime": "2023-10-18T14:42:41.1606344Z"}' - headers: - azure-asyncoperation: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - cache-control: - - no-cache - content-length: - - '548' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:43:41 GMT - etag: - - '"00007e15-0000-3200-0000-652feee10000"' - expires: - - '-1' - location: - - https://management.azure.com/providers/Microsoft.HybridNetwork/locations/uaenorth/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - x-content-type-options: - - nosniff - status: - code: 202 - message: Accepted -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Succeeded", "startTime": "2023-10-18T14:42:41.1606344Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '569' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:44:11 GMT - etag: - - '"00008215-0000-3200-0000-652fef2d0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -- request: - body: null - headers: - Accept: - - '*/*' - Accept-Encoding: - - gzip, deflate - CommandName: - - aosm nfd delete - Connection: - - keep-alive - ParameterSetName: - - --definition-type -f --clean --force - User-Agent: - - AZURECLI/2.53.0 azsdk-python-hybridnetwork/unknown Python/3.8.10 (Linux-5.10.102.1-microsoft-standard-WSL2-x86_64-with-glibc2.29) - method: GET - uri: https://management.azure.com/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418?api-version=2020-01-01-preview - response: - body: - string: '{"id": "/providers/Microsoft.HybridNetwork/locations/UAENORTH/operationStatuses/cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "name": "cb49c45f-3ebf-45d3-a099-bf641f97078d*CAC1063A94B052A2C0FBBA346CD54C0887C75C9C9CA89E8D0AC1EA95BA50F418", - "resourceId": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/cli_test_vnf_nsd_000001/providers/Microsoft.HybridNetwork/publishers/automated-tests-ubuntuPublisher", - "status": "Succeeded", "startTime": "2023-10-18T14:42:41.1606344Z", "properties": - null}' - headers: - cache-control: - - no-cache - content-length: - - '569' - content-type: - - application/json; charset=utf-8 - date: - - Wed, 18 Oct 2023 14:44:11 GMT - etag: - - '"00008215-0000-3200-0000-652fef2d0000"' - expires: - - '-1' - pragma: - - no-cache - strict-transport-security: - - max-age=31536000; includeSubDomains - transfer-encoding: - - chunked - vary: - - Accept-Encoding - x-content-type-options: - - nosniff - status: - code: 200 - message: OK -version: 1 diff --git a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_input_template.json b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_input_template.json index 1a350599c29..f3ec1f24aa3 100644 --- a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_input_template.json +++ b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_input_template.json @@ -1,5 +1,5 @@ { - "publisher_name": "automated-tests-nginx-publisher", + "publisher_name": "automated-cli-tests-nginx-publisher", "publisher_resource_group_name": "{{publisher_resource_group_name}}", "nf_name": "nginx", "version": "1.0.0", diff --git a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_nsd_input_template.json b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_nsd_input_template.json index dacd8c29e4c..81e42a38501 100644 --- a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_nsd_input_template.json +++ b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/cnf_nsd_input_template.json @@ -1,6 +1,6 @@ { "location": "uaenorth", - "publisher_name": "automated-tests-nginx-publisher", + "publisher_name": "automated-cli-tests-nginx-publisher", "publisher_resource_group_name": "{{publisher_resource_group_name}}", "acr_artifact_store_name": "nginx-acr", "network_functions": [ @@ -10,7 +10,7 @@ "publisher_offering_location": "uaenorth", "type": "cnf", "multiple_instances": false, - "publisher": "automated-tests-nginx-publisher", + "publisher": "automated-cli-tests-nginx-publisher", "publisher_resource_group": "{{publisher_resource_group_name}}" } ], diff --git a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_input_template.json b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_input_template.json index f0e4fdf6a3d..f4ded903cb3 100644 --- a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_input_template.json +++ b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_input_template.json @@ -1,5 +1,5 @@ { - "publisher_name": "automated-tests-ubuntuPublisher", + "publisher_name": "automated-cli-tests-ubuntu-publisher", "publisher_resource_group_name": "{{publisher_resource_group_name}}", "acr_artifact_store_name": "ubuntu-acr", "location": "uaenorth", diff --git a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_nsd_input_template.json b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_nsd_input_template.json index 320d0f1339b..0545d8595d6 100644 --- a/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_nsd_input_template.json +++ b/src/aosm/azext_aosm/tests/latest/scenario_test_mocks/mock_input_templates/vnf_nsd_input_template.json @@ -1,6 +1,6 @@ { "location": "uaenorth", - "publisher_name": "automated-tests-ubuntuPublisher", + "publisher_name": "automated-cli-tests-ubuntu-publisher", "publisher_resource_group_name": "{{publisher_resource_group_name}}", "acr_artifact_store_name": "ubuntu-acr", "network_functions": [ @@ -10,7 +10,7 @@ "publisher_offering_location": "uaenorth", "type": "vnf", "multiple_instances": false, - "publisher": "automated-tests-ubuntuPublisher", + "publisher": "automated-cli-tests-ubuntu-publisher", "publisher_resource_group": "{{publisher_resource_group_name}}" } ], diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/__init__.py new file mode 100644 index 00000000000..77845c19798 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/__init__.py @@ -0,0 +1,6 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +__import__('pkg_resources').declare_namespace(__name__) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/__init__.py new file mode 100644 index 00000000000..849489fca33 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/__init__.py new file mode 100644 index 00000000000..15f20338c80 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/__init__.py @@ -0,0 +1,243 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import os + +from typing import Union, Iterable, AnyStr, IO, Any, Dict # pylint: disable=unused-import +from ._version import VERSION +from ._blob_client import BlobClient +from ._container_client import ContainerClient +from ._blob_service_client import BlobServiceClient +from ._lease import BlobLeaseClient +from ._download import StorageStreamDownloader +from ._quick_query_helper import BlobQueryReader +from ._shared_access_signature import generate_account_sas, generate_container_sas, generate_blob_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.response_handlers import PartialBatchErrorException +from ._shared.models import( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode, + UserDelegationKey +) +from ._generated.models import ( + RehydratePriority, +) +from ._models import ( + BlobType, + BlockState, + StandardBlobTier, + PremiumPageBlobTier, + BlobImmutabilityPolicyMode, + SequenceNumberAction, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + RetentionPolicy, + StaticWebsite, + CorsRule, + ContainerProperties, + BlobProperties, + FilteredBlob, + LeaseProperties, + ContentSettings, + CopyProperties, + BlobBlock, + PageRange, + AccessPolicy, + ContainerSasPermissions, + BlobSasPermissions, + CustomerProvidedEncryptionKey, + ContainerEncryptionScope, + BlobQueryError, + DelimitedJsonDialect, + DelimitedTextDialect, + QuickQueryDialect, + ArrowDialect, + ArrowType, + ObjectReplicationPolicy, + ObjectReplicationRule, + ImmutabilityPolicy +) +from ._list_blobs_helper import BlobPrefix + +__version__ = VERSION + + +def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + **kwargs): + # type: (...) -> Dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = client.download_blob(**kwargs) + stream.readinto(handle) + + +def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream. + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'ContainerClient', + 'BlobClient', + 'BlobType', + 'BlobLeaseClient', + 'StorageErrorCode', + 'UserDelegationKey', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'BlockState', + 'StandardBlobTier', + 'PremiumPageBlobTier', + 'SequenceNumberAction', + 'BlobImmutabilityPolicyMode', + 'ImmutabilityPolicy', + 'PublicAccess', + 'BlobAnalyticsLogging', + 'Metrics', + 'RetentionPolicy', + 'StaticWebsite', + 'CorsRule', + 'ContainerProperties', + 'BlobProperties', + 'BlobPrefix', + 'FilteredBlob', + 'LeaseProperties', + 'ContentSettings', + 'CopyProperties', + 'BlobBlock', + 'PageRange', + 'AccessPolicy', + 'QuickQueryDialect', + 'ContainerSasPermissions', + 'BlobSasPermissions', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageStreamDownloader', + 'CustomerProvidedEncryptionKey', + 'RehydratePriority', + 'generate_account_sas', + 'generate_container_sas', + 'generate_blob_sas', + 'PartialBatchErrorException', + 'ContainerEncryptionScope', + 'BlobQueryError', + 'DelimitedJsonDialect', + 'DelimitedTextDialect', + 'ArrowDialect', + 'ArrowType', + 'BlobQueryReader', + 'ObjectReplicationPolicy', + 'ObjectReplicationRule' +] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_blob_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_blob_client.py new file mode 100644 index 00000000000..c0a8ae46c5f --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_blob_client.py @@ -0,0 +1,4314 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines,no-self-use + +from functools import partial +from io import BytesIO +from typing import ( + Any, AnyStr, AsyncIterable, Dict, IO, Iterable, List, Optional, overload, Tuple, Union, + TYPE_CHECKING +) +from urllib.parse import urlparse, quote, unquote +import warnings + +from typing_extensions import Self + +from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace +from ._shared import encode_base64 +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query, TransportWrapper +from ._shared.uploads import IterStreamer +from ._shared.uploads_async import AsyncIterStreamer +from ._shared.request_handlers import ( + add_metadata_headers, get_length, read_length, + validate_and_format_range_headers) +from ._shared.response_handlers import return_response_headers, process_storage_error, return_headers_and_deserialized +from ._generated import AzureBlobStorage +from ._generated.models import ( + DeleteSnapshotsOptionType, + BlobHTTPHeaders, + BlockLookupList, + AppendPositionAccessConditions, + SequenceNumberAccessConditions, + QueryRequest, + CpkInfo) +from ._serialize import ( + get_modify_conditions, + get_source_conditions, + get_cpk_scope_info, + get_api_version, + serialize_blob_tags_header, + serialize_blob_tags, + serialize_query_format, get_access_conditions +) +from ._deserialize import ( + get_page_ranges_result, + deserialize_blob_properties, + deserialize_blob_stream, + parse_tags, + deserialize_pipeline_response_into_cls +) +from ._download import StorageStreamDownloader +from ._encryption import StorageEncryptionMixin +from ._lease import BlobLeaseClient +from ._models import BlobType, BlobBlock, BlobProperties, BlobQueryError, QuickQueryDialect, \ + DelimitedJsonDialect, DelimitedTextDialect, PageRangePaged, PageRange +from ._quick_query_helper import BlobQueryReader +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob, + _any_conditions +) + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from datetime import datetime + from ._generated.models import BlockList + from ._models import ( + ContentSettings, + ImmutabilityPolicy, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + +_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION = ( + 'The require_encryption flag is set, but encryption is not supported' + ' for this method.') + + +class BlobClient(StorageAccountHostsMixin, StorageEncryptionMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url: str, + container_name: str, + blob_name: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> None: + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + + if not (container_name and blob_name): + raise ValueError("Please specify a container name and blob name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + path_snapshot, sas_token = parse_query(parsed_url.query) + + self.container_name = container_name + self.blob_name = blob_name + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + # This parameter is used for the hierarchy traversal. Give precedence to credential. + self._raw_credential = credential if credential else sas_token + self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot) + super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._configure_encryption(kwargs) + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, str): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + quote(self.blob_name, safe='~/'), + self._query_str) + + def _encode_source_url(self, source_url): + parsed_source_url = urlparse(source_url) + source_scheme = parsed_source_url.scheme + source_hostname = parsed_source_url.netloc.rstrip('/') + source_path = unquote(parsed_source_url.path) + source_query = parsed_source_url.query + result = ["{}://{}{}".format(source_scheme, source_hostname, quote(source_path, safe='~/'))] + if source_query: + result.append(source_query) + return '?'.join(result) + + @classmethod + def from_blob_url( + cls, blob_url: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + **kwargs: Any + ) -> Self: + """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name. + + :param str blob_url: + The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type blob_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. If specified, this will override + the snapshot in the url. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + """ + try: + if not blob_url.lower().startswith('http'): + blob_url = "https://" + blob_url + except AttributeError: + raise ValueError("Blob URL must be a string.") + parsed_url = urlparse(blob_url.rstrip('/')) + + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(blob_url)) + + account_path = "" + if ".core." in parsed_url.netloc: + # .core. is indicating non-customized url. Blob name with directory info can also be parsed. + path_blob = parsed_url.path.lstrip('/').split('/', maxsplit=1) + elif "localhost" in parsed_url.netloc or "127.0.0.1" in parsed_url.netloc: + path_blob = parsed_url.path.lstrip('/').split('/', maxsplit=2) + account_path += '/' + path_blob[0] + else: + # for customized url. blob name that has directory info cannot be parsed. + path_blob = parsed_url.path.lstrip('/').split('/') + if len(path_blob) > 2: + account_path = "/" + "/".join(path_blob[:-2]) + + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + + msg_invalid_url = "Invalid URL. Provide a blob_url with a valid blob and container name." + if len(path_blob) <= 1: + raise ValueError(msg_invalid_url) + container_name, blob_name = unquote(path_blob[-2]), unquote(path_blob[-1]) + if not container_name or not blob_name: + raise ValueError(msg_invalid_url) + + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=path_snapshot, credential=credential, **kwargs + ) + + @classmethod + def from_connection_string( + cls, conn_str: str, + container_name: str, + blob_name: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create BlobClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A Blob client. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_blob] + :end-before: [END auth_from_connection_string_blob] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, blob_name=blob_name, + snapshot=snapshot, credential=credential, **kwargs + ) + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_blob_options( # pylint:disable=too-many-statements + self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]], + blob_type: Union[str, BlobType] = BlobType.BlockBlob, + length: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ) -> Dict[str, Any]: + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + encryption_options = { + 'required': self.require_encryption, + 'version': self.encryption_version, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function, + } + + encoding = kwargs.pop('encoding', 'UTF-8') + if isinstance(data, str): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__') and not isinstance(data, (list, tuple, set, dict)): + stream = IterStreamer(data, encoding=encoding) + elif hasattr(data, '__aiter__'): + stream = AsyncIterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + + validate_content = kwargs.pop('validate_content', False) + content_settings = kwargs.pop('content_settings', None) + overwrite = kwargs.pop('overwrite', False) + max_concurrency = kwargs.pop('max_concurrency', 1) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + kwargs['cpk_info'] = cpk_info + + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs['lease_access_conditions'] = get_access_conditions(kwargs.pop('lease', None)) + kwargs['modified_access_conditions'] = get_modify_conditions(kwargs) + kwargs['cpk_scope_info'] = get_cpk_scope_info(kwargs) + if content_settings: + kwargs['blob_headers'] = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + kwargs['blob_tags_string'] = serialize_blob_tags_header(kwargs.pop('tags', None)) + kwargs['stream'] = stream + kwargs['length'] = length + kwargs['overwrite'] = overwrite + kwargs['headers'] = headers + kwargs['validate_content'] = validate_content + kwargs['blob_settings'] = self._config + kwargs['max_concurrency'] = max_concurrency + kwargs['encryption_options'] = encryption_options + + if blob_type == BlobType.BlockBlob: + kwargs['client'] = self._client.block_blob + kwargs['data'] = data + elif blob_type == BlobType.PageBlob: + if self.encryption_version == '2.0' and (self.require_encryption or self.key_encryption_key is not None): + raise ValueError("Encryption version 2.0 does not currently support page blobs.") + kwargs['client'] = self._client.page_blob + elif blob_type == BlobType.AppendBlob: + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + kwargs['client'] = self._client.append_blob + else: + raise ValueError("Unsupported BlobType: {}".format(blob_type)) + return kwargs + + def _upload_blob_from_url_options(self, source_url, **kwargs): + # type: (...) -> Dict[str, Any] + tier = kwargs.pop('standard_blob_tier', None) + overwrite = kwargs.pop('overwrite', False) + content_settings = kwargs.pop('content_settings', None) + source_authorization = kwargs.pop('source_authorization', None) + if content_settings: + kwargs['blob_http_headers'] = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=None, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'copy_source_authorization': source_authorization, + 'content_length': 0, + 'copy_source_blob_properties': kwargs.pop('include_source_blob_properties', True), + 'source_content_md5': kwargs.pop('source_content_md5', None), + 'copy_source': source_url, + 'modified_access_conditions': get_modify_conditions(kwargs), + 'blob_tags_string': serialize_blob_tags_header(kwargs.pop('tags', None)), + 'cls': return_response_headers, + 'lease_access_conditions': get_access_conditions(kwargs.pop('destination_lease', None)), + 'tier': tier.value if tier else None, + 'source_modified_access_conditions': get_source_conditions(kwargs), + 'cpk_info': cpk_info, + 'cpk_scope_info': get_cpk_scope_info(kwargs) + } + options.update(kwargs) + if not overwrite and not _any_conditions(**options): # pylint: disable=protected-access + options['modified_access_conditions'].if_none_match = '*' + return options + + @distributed_trace + def upload_blob_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Dict[str, Any] + """ + Creates a new Block Blob where the content of the blob is read from a given URL. + The content of an existing blob is overwritten with the new blob. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. + :keyword bool include_source_blob_properties: + Indicates if properties from the source blob should be copied. Defaults to True. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :paramtype tags: dict(str, str) + :keyword bytearray source_content_md5: + Specify the md5 that is used to verify the integrity of the source bytes. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_blob_from_url_options( + source_url=self._encode_source_url(source_url), + **kwargs) + try: + return self._client.block_blob.put_blob_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def upload_blob( + self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]], + blob_type: Union[str, BlobType] = BlobType.BlockBlob, + length: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ) -> Dict[str, Any]: + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword progress_hook: + A callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transfered + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], None] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 12 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return upload_page_blob(**options) + return upload_append_blob(**options) + + def _download_blob_options(self, offset=None, length=None, encoding=None, **kwargs): + # type: (Optional[int], Optional[int], Optional[str], **Any) -> Dict[str, Any] + if self.require_encryption and not self.key_encryption_key: + raise ValueError("Encryption required but no key was provided.") + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Service actually uses an end-range inclusive index + + validate_content = kwargs.pop('validate_content', False) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'clients': self._client, + 'config': self._config, + 'start_range': offset, + 'end_range': length, + 'version_id': kwargs.pop('version_id', None), + 'validate_content': validate_content, + 'encryption_options': { + 'required': self.require_encryption, + 'key': self.key_encryption_key, + 'resolver': self.key_resolver_function}, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'download_cls': kwargs.pop('cls', None) or deserialize_blob_stream, + 'max_concurrency':kwargs.pop('max_concurrency', 1), + 'encoding': encoding, + 'timeout': kwargs.pop('timeout', None), + 'name': self.blob_name, + 'container': self.container_name} + options.update(kwargs) + return options + + @overload + def download_blob( + self, offset: int = None, + length: int = None, + *, + encoding: str, + **kwargs) -> StorageStreamDownloader[str]: + ... + + @overload + def download_blob( + self, offset: int = None, + length: int = None, + *, + encoding: None = None, + **kwargs) -> StorageStreamDownloader[bytes]: + ... + + @distributed_trace + def download_blob( + self, offset: int = None, + length: int = None, + *, + encoding: Optional[str] = None, + **kwargs) -> StorageStreamDownloader: + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword progress_hook: + A callback to track the progress of a long running download. The signature is + function(current: int, total: int) where current is the number of bytes transfered + so far, and total is the total size of the download. + :paramtype progress_hook: Callable[[int, int], None] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 12 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + encoding=encoding, + **kwargs) + return StorageStreamDownloader(**options) + + def _quick_query_options(self, query_expression, + **kwargs): + # type: (str, **Any) -> Dict[str, Any] + delimiter = '\n' + input_format = kwargs.pop('blob_format', None) + if input_format == QuickQueryDialect.DelimitedJson: + input_format = DelimitedJsonDialect() + if input_format == QuickQueryDialect.DelimitedText: + input_format = DelimitedTextDialect() + input_parquet_format = input_format == "ParquetDialect" + if input_format and not input_parquet_format: + try: + delimiter = input_format.lineterminator + except AttributeError: + try: + delimiter = input_format.delimiter + except AttributeError: + raise ValueError("The Type of blob_format can only be DelimitedTextDialect or " + "DelimitedJsonDialect or ParquetDialect") + output_format = kwargs.pop('output_format', None) + if output_format == QuickQueryDialect.DelimitedJson: + output_format = DelimitedJsonDialect() + if output_format == QuickQueryDialect.DelimitedText: + output_format = DelimitedTextDialect() + if output_format: + if output_format == "ParquetDialect": + raise ValueError("ParquetDialect is invalid as an output format.") + try: + delimiter = output_format.lineterminator + except AttributeError: + try: + delimiter = output_format.delimiter + except AttributeError: + pass + else: + output_format = input_format if not input_parquet_format else None + query_request = QueryRequest( + expression=query_expression, + input_serialization=serialize_query_format(input_format), + output_serialization=serialize_query_format(output_format) + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo( + encryption_key=cpk.key_value, + encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm + ) + options = { + 'query_request': query_request, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'snapshot': self.snapshot, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized, + } + options.update(kwargs) + return options, delimiter + + @distributed_trace + def query_blob(self, query_expression, **kwargs): + # type: (str, **Any) -> BlobQueryReader + """Enables users to select/project on blob/or blob snapshot data by providing simple query expressions. + This operations returns a BlobQueryReader, users need to use readall() or readinto() to get query data. + + :param str query_expression: + Required. a query statement. + :keyword Callable[~azure.storage.blob.BlobQueryError] on_error: + A function to be called on any processing errors returned by the service. + :keyword blob_format: + Optional. Defines the serialization of the data currently stored in the blob. The default is to + treat the blob data as CSV data formatted in the default dialect. This can be overridden with + a custom DelimitedTextDialect, or DelimitedJsonDialect or "ParquetDialect" (passed as a string or enum). + These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string + :paramtype blob_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect + or ~azure.storage.blob.QuickQueryDialect or str + :keyword output_format: + Optional. Defines the output serialization for the data stream. By default the data will be returned + as it is represented in the blob (Parquet formats default to DelimitedTextDialect). + By providing an output format, the blob data will be reformatted according to that profile. + This value can be a DelimitedTextDialect or a DelimitedJsonDialect or ArrowDialect. + These dialects can be passed through their respective classes, the QuickQueryDialect enum or as a string + :paramtype output_format: ~azure.storage.blob.DelimitedTextDialect or ~azure.storage.blob.DelimitedJsonDialect + or list[~azure.storage.blob.ArrowDialect] or ~azure.storage.blob.QuickQueryDialect or str + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A streaming object (BlobQueryReader) + :rtype: ~azure.storage.blob.BlobQueryReader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_query.py + :start-after: [START query] + :end-before: [END query] + :language: python + :dedent: 4 + :caption: select/project on blob/or blob snapshot data by providing simple query expressions. + """ + errors = kwargs.pop("on_error", None) + error_cls = kwargs.pop("error_cls", BlobQueryError) + encoding = kwargs.pop("encoding", None) + options, delimiter = self._quick_query_options(query_expression, **kwargs) + try: + headers, raw_response_body = self._client.blob.query(**options) + except HttpResponseError as error: + process_storage_error(error) + return BlobQueryReader( + name=self.blob_name, + container=self.container_name, + errors=errors, + record_delimiter=delimiter, + encoding=encoding, + headers=headers, + response=raw_response_body, + error_cls=error_cls) + + @staticmethod + def _generic_delete_blob_options(delete_snapshots=None, **kwargs): + # type: (str, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if delete_snapshots: + delete_snapshots = DeleteSnapshotsOptionType(delete_snapshots) + options = { + 'timeout': kwargs.pop('timeout', None), + 'snapshot': kwargs.pop('snapshot', None), # this is added for delete_blobs + 'delete_snapshots': delete_snapshots or None, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions} + options.update(kwargs) + return options + + def _delete_blob_options(self, delete_snapshots=None, **kwargs): + # type: (str, **Any) -> Dict[str, Any] + if self.snapshot and delete_snapshots: + raise ValueError("The delete_snapshots option cannot be used with a specific snapshot.") + options = self._generic_delete_blob_options(delete_snapshots, **kwargs) + options['snapshot'] = self.snapshot + options['version_id'] = kwargs.pop('version_id', None) + options['blob_delete_type'] = kwargs.pop('blob_delete_type', None) + return options + + @distributed_trace + def delete_blob(self, delete_snapshots=None, **kwargs): + # type: (str, **Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 12 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + self._client.blob.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def undelete_blob(self, **kwargs): + # type: (**Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + If blob versioning is enabled, the base blob cannot be restored using this + method. Instead use :func:`start_copy_from_url` with the URL of the blob version + you wish to promote to the current version. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 8 + :caption: Undeleting a blob. + """ + try: + self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace() + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: boolean + """ + try: + self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + # Encrypted with CPK + except ResourceExistsError: + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def get_blob_properties(self, **kwargs): + # type: (**Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 8 + :caption: Getting the properties for a blob. + """ + # TODO: extract this out as _get_blob_properties_options + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + cls_method = kwargs.pop('cls', None) + if cls_method: + kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) + blob_props = self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + def _set_http_headers_options(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + options = { + 'timeout': kwargs.pop('timeout', None), + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], **Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return self._client.blob.set_http_headers(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _set_blob_metadata_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return self._client.blob.set_metadata(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_immutability_policy(self, immutability_policy, **kwargs): + # type: (ImmutabilityPolicy, **Any) -> Dict[str, str] + """The Set Immutability Policy operation sets the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + return self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) + + @distributed_trace + def delete_immutability_policy(self, **kwargs): + # type: (**Any) -> None + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + self._client.blob.delete_immutability_policy(**kwargs) + + @distributed_trace + def set_legal_hold(self, legal_hold, **kwargs): + # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] + """The Set Legal Hold operation sets a legal hold on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param bool legal_hold: + Specified if a legal hold should be set on the blob. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, Union[str, datetime, bool]] + """ + + return self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) + + def _create_page_blob_options( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + sequence_number = kwargs.pop('sequence_number', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + tier = None + if premium_page_blob_tier: + try: + tier = premium_page_blob_tier.value # type: ignore + except AttributeError: + tier = premium_page_blob_tier # type: ignore + + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_content_length': size, + 'blob_sequence_number': sequence_number, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + "tier": tier, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return self._client.page_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _create_append_blob_options(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + blob_headers = None + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'content_length': 0, + 'blob_http_headers': blob_headers, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'blob_tags_string': blob_tags_string, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.append_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _create_snapshot_options(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'headers': headers} + options.update(kwargs) + return options + + @distributed_trace + def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 8 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return self._client.blob.create_snapshot(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _start_copy_from_url_options(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Any] + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + if 'source_lease' in kwargs: + source_lease = kwargs.pop('source_lease') + try: + headers['x-ms-source-lease-id'] = source_lease.id + except AttributeError: + headers['x-ms-source-lease-id'] = source_lease + + tier = kwargs.pop('premium_page_blob_tier', None) or kwargs.pop('standard_blob_tier', None) + tags = kwargs.pop('tags', None) + + # Options only available for sync copy + requires_sync = kwargs.pop('requires_sync', None) + encryption_scope_str = kwargs.pop('encryption_scope', None) + source_authorization = kwargs.pop('source_authorization', None) + # If tags is a str, interpret that as copy_source_tags + copy_source_tags = isinstance(tags, str) + + if incremental_copy: + if source_authorization: + raise ValueError("Source authorization tokens are not applicable for incremental copying.") + if copy_source_tags: + raise ValueError("Copying source tags is not applicable for incremental copying.") + + # TODO: refactor start_copy_from_url api in _blob_client.py. Call _generated/_blob_operations.py copy_from_url + # when requires_sync=True is set. + # Currently both sync copy and async copy are calling _generated/_blob_operations.py start_copy_from_url. + # As sync copy diverges more from async copy, more problem will surface. + if requires_sync is True: + headers['x-ms-requires-sync'] = str(requires_sync) + if encryption_scope_str: + headers['x-ms-encryption-scope'] = encryption_scope_str + if source_authorization: + headers['x-ms-copy-source-authorization'] = source_authorization + if copy_source_tags: + headers['x-ms-copy-source-tag-option'] = tags + else: + if encryption_scope_str: + raise ValueError( + "Encryption_scope is only supported for sync copy, please specify requires_sync=True") + if source_authorization: + raise ValueError( + "Source authorization tokens are only supported for sync copy, please specify requires_sync=True") + if copy_source_tags: + raise ValueError( + "Copying source tags is only supported for sync copy, please specify requires_sync=True") + + timeout = kwargs.pop('timeout', None) + dest_mod_conditions = get_modify_conditions(kwargs) + blob_tags_string = serialize_blob_tags_header(tags) if not copy_source_tags else None + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + options = { + 'copy_source': source_url, + 'seal_blob': kwargs.pop('seal_destination_blob', None), + 'timeout': timeout, + 'modified_access_conditions': dest_mod_conditions, + 'blob_tags_string': blob_tags_string, + 'headers': headers, + 'cls': return_response_headers, + } + if not incremental_copy: + source_mod_conditions = get_source_conditions(kwargs) + dest_access_conditions = get_access_conditions(kwargs.pop('destination_lease', None)) + options['source_modified_access_conditions'] = source_mod_conditions + options['lease_access_conditions'] = dest_access_conditions + options['tier'] = tier.value if tier else None + options.update(kwargs) + return options + + @distributed_trace + def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, **Any) -> Dict[str, Union[str, datetime]] + """Copies a blob from the given URL. + + This operation returns a dictionary containing `copy_status` and `copy_id`, + which can be used to check the status of or abort the copy operation. + `copy_status` will be 'success' if the copy completed synchronously or + 'pending' if the copy has been started asynchronously. For asynchronous copies, + the status can be checked by polling the :func:`get_blob_properties` method and + checking the copy status. Set `requires_sync` to True to force the copy to be synchronous. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_). + + The (case-sensitive) literal "COPY" can instead be passed to copy tags from the source blob. + This option is only available when `incremental_copy=False` and `requires_sync=True`. + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) or Literal["COPY"] + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. This option is only available when `incremental_copy` is + set to False and `requires_sync` is set to True. + + .. versionadded:: 12.9.0 + + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.10.0 + + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, Union[str, ~datetime.datetime]] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return self._client.page_blob.copy_incremental(**options) + return self._client.blob.start_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _abort_copy_options(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + options = { + 'copy_id': copy_id, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], **Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID string, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 12 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + self._client.blob.abort_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], **Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace + def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + if self.snapshot and kwargs.get('version_id'): + raise ValueError("Snapshot and version_id cannot be set at the same time") + try: + self._client.blob.set_tier( + tier=standard_blob_tier, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def _stage_block_options( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_id = encode_base64(str(block_id)) + if isinstance(data, str): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if isinstance(data, bytes): + data = data[:length] + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'block_id': block_id, + 'content_length': length, + 'body': data, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return self._client.block_blob.stage_block(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _stage_block_from_url_options( + self, block_id, # type: str + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + source_authorization = kwargs.pop('source_authorization', None) + if source_length is not None and source_offset is None: + raise ValueError("Source offset value must not be None if length is set.") + if source_length is not None: + source_length = source_offset + source_length - 1 + block_id = encode_base64(str(block_id)) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + range_header = None + if source_offset is not None: + range_header, _ = validate_and_format_range_headers(source_offset, source_length) + + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'copy_source_authorization': source_authorization, + 'block_id': block_id, + 'content_length': 0, + 'source_url': source_url, + 'source_range': range_header, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + } + options.update(kwargs) + return options + + @distributed_trace + def stage_block_from_url( + self, block_id, # type: Union[str, int] + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + :returns: Blob property dict. + :rtype: dict[str, Any] + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return self._client.block_blob.stage_block_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _get_block_list_result(self, blocks): + # type: (BlockList) -> Tuple[List[BlobBlock], List[BlobBlock]] + committed = [] # type: List + uncommitted = [] # type: List + if blocks.committed_blocks: + committed = [BlobBlock._from_generated(b) for b in blocks.committed_blocks] # pylint: disable=protected-access + if blocks.uncommitted_blocks: + uncommitted = [BlobBlock._from_generated(b) for b in blocks.uncommitted_blocks] # pylint: disable=protected-access + return committed, uncommitted + + @distributed_trace + def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], **Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + def _commit_block_list_options( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + for block in block_list: + try: + if block.state.value == 'committed': + block_lookup.committed.append(encode_base64(str(block.id))) + elif block.state.value == 'uncommitted': + block_lookup.uncommitted.append(encode_base64(str(block.id))) + else: + block_lookup.latest.append(encode_base64(str(block.id))) + except AttributeError: + block_lookup.latest.append(encode_base64(str(block))) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + blob_headers = None + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if content_settings: + blob_headers = BlobHTTPHeaders( + blob_cache_control=content_settings.cache_control, + blob_content_type=content_settings.content_type, + blob_content_md5=content_settings.content_md5, + blob_content_encoding=content_settings.content_encoding, + blob_content_language=content_settings.content_language, + blob_content_disposition=content_settings.content_disposition + ) + + validate_content = kwargs.pop('validate_content', False) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + immutability_policy = kwargs.pop('immutability_policy', None) + if immutability_policy: + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = serialize_blob_tags_header(kwargs.pop('tags', None)) + + options = { + 'blocks': block_lookup, + 'blob_http_headers': blob_headers, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'tier': tier.value if tier else None, + 'blob_tags_string': blob_tags_string, + 'headers': headers + } + options.update(kwargs) + return options + + @distributed_trace + def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return self._client.block_blob.commit_block_list(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTier must be specified") + try: + self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def _set_blob_tags_options(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + tags = serialize_blob_tags(tags) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'tags': tags, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return self._client.blob.set_tags(**options) + except HttpResponseError as error: + process_storage_error(error) + + def _get_blob_tags_options(self, **kwargs): + # type: (**Any) -> Dict[str, str] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'version_id': kwargs.pop('version_id', None), + 'snapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_headers_and_deserialized} + return options + + @distributed_trace + def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, or snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on destination blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except HttpResponseError as error: + process_storage_error(error) + + def _get_page_ranges_options( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + if length is not None: + length = offset + length - 1 # Reformat to an inclusive range index + page_range, _ = validate_and_format_range_headers( + offset, length, start_range_required=False, end_range_required=False, align_to_page=True + ) + options = { + 'snapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'timeout': kwargs.pop('timeout', None), + 'range': page_range} + if previous_snapshot_diff: + try: + options['prevsnapshot'] = previous_snapshot_diff.snapshot # type: ignore + except AttributeError: + try: + options['prevsnapshot'] = previous_snapshot_diff['snapshot'] # type: ignore + except TypeError: + options['prevsnapshot'] = previous_snapshot_diff + options.update(kwargs) + return options + + @distributed_trace + def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """DEPRECATED: Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + warnings.warn( + "get_page_ranges is deprecated, use list_page_ranges instead", + DeprecationWarning + ) + + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = self._client.page_blob.get_page_ranges(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace + def list_page_ranges( + self, + *, + offset: Optional[int] = None, + length: Optional[int] = None, + previous_snapshot: Optional[Union[str, Dict[str, Any]]] = None, + **kwargs: Any + ) -> ItemPaged[PageRange]: + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. If `previous_snapshot` is specified, the result will be + a diff of changes between the target blob and the previous snapshot. + + :keyword int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword previous_snapshot: + A snapshot value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by `previous_snapshot` + is the older of the two. + :paramtype previous_snapshot: str or Dict[str, Any] + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int results_per_page: + The maximum number of page ranges to retrieve per API call. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) of PageRange. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.PageRange] + """ + results_per_page = kwargs.pop('results_per_page', None) + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot, + **kwargs) + + if previous_snapshot: + command = partial( + self._client.page_blob.get_page_ranges_diff, + **options) + else: + command = partial( + self._client.page_blob.get_page_ranges, + **options) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=PageRangePaged) + + @distributed_trace + def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = self._client.page_blob.get_page_ranges_diff(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + def _set_sequence_number_options(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if sequence_number_action is None: + raise ValueError("A sequence number action must be specified") + options = { + 'sequence_number_action': sequence_number_action, + 'timeout': kwargs.pop('timeout', None), + 'blob_sequence_number': sequence_number, + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def set_sequence_number(self, sequence_number_action, sequence_number=None, **kwargs): + # type: (Union[str, SequenceNumberAction], Optional[str], **Any) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return self._client.page_blob.update_sequence_number(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _resize_blob_options(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if size is None: + raise ValueError("A content length must be specified for a Page Blob.") + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'blob_content_length': size, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def resize_blob(self, size, **kwargs): + # type: (int, **Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return self._client.page_blob.resize(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_page_options( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if isinstance(page, str): + page = page.encode(kwargs.pop('encoding', 'UTF-8')) + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) # type: ignore + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + validate_content = kwargs.pop('validate_content', False) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': page[:length], + 'content_length': length, + 'transactional_content_md5': None, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return self._client.page_blob.upload_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _upload_pages_from_url_options( # type: ignore + self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # TODO: extract the code to a method format_range + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + if source_offset is None or offset % 512 != 0: + raise ValueError("source_offset must be an integer that aligns with 512 page size") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) # should subtract 1 here? + + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + source_authorization = kwargs.pop('source_authorization', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + source_content_md5 = kwargs.pop('source_content_md5', None) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'copy_source_authorization': source_authorization, + 'source_url': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'source_content_md5': bytearray(source_content_md5) if source_content_md5 else None, + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _clear_page_options(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + seq_conditions = SequenceNumberAccessConditions( + if_sequence_number_less_than_or_equal_to=kwargs.pop('if_sequence_number_lte', None), + if_sequence_number_less_than=kwargs.pop('if_sequence_number_lt', None), + if_sequence_number_equal_to=kwargs.pop('if_sequence_number_eq', None) + ) + mod_conditions = get_modify_conditions(kwargs) + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 page size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 page size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'content_length': 0, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range, + 'lease_access_conditions': access_conditions, + 'sequence_number_access_conditions': seq_conditions, + 'modified_access_conditions': mod_conditions, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def clear_page(self, offset, length, **kwargs): + # type: (int, int, **Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return self._client.page_blob.clear_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _append_block_options( # type: ignore + self, data, # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + if isinstance(data, str): + data = data.encode(kwargs.pop('encoding', 'UTF-8')) # type: ignore + if length is None: + length = get_length(data) + if length is None: + length, data = read_length(data) + if length == 0: + return {} + if isinstance(data, bytes): + data = data[:length] + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + validate_content = kwargs.pop('validate_content', False) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + options = { + 'body': data, + 'content_length': length, + 'timeout': kwargs.pop('timeout', None), + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'validate_content': validate_content, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def append_block( # type: ignore + self, data, # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return self._client.append_blob.append_block(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _append_block_from_url_options( # type: ignore + self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + # If end range is provided, start range must be provided + if source_length is not None and source_offset is None: + raise ValueError("source_offset should also be specified if source_length is specified") + # Format based on whether length is present + source_range = None + if source_length is not None: + end_range = source_offset + source_length - 1 + source_range = 'bytes={0}-{1}'.format(source_offset, end_range) + elif source_offset is not None: + source_range = "bytes={0}-".format(source_offset) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + maxsize_condition = kwargs.pop('maxsize_condition', None) + source_content_md5 = kwargs.pop('source_content_md5', None) + append_conditions = None + if maxsize_condition or appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + max_size=maxsize_condition, + append_position=appendpos_condition + ) + source_authorization = kwargs.pop('source_authorization', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + source_mod_conditions = get_source_conditions(kwargs) + cpk_scope_info = get_cpk_scope_info(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + + options = { + 'copy_source_authorization': source_authorization, + 'source_url': copy_source_url, + 'content_length': 0, + 'source_range': source_range, + 'source_content_md5': source_content_md5, + 'transactional_content_md5': None, + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'source_modified_access_conditions': source_mod_conditions, + 'cpk_scope_info': cpk_scope_info, + 'cpk_info': cpk_info, + 'cls': return_response_headers, + 'timeout': kwargs.pop('timeout', None)} + options.update(kwargs) + return options + + @distributed_trace + def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes (inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return self._client.append_blob.append_block_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _seal_append_blob_options(self, **kwargs): + # type: (...) -> Dict[str, Any] + if self.require_encryption or (self.key_encryption_key is not None): + raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION) + + appendpos_condition = kwargs.pop('appendpos_condition', None) + append_conditions = None + if appendpos_condition is not None: + append_conditions = AppendPositionAccessConditions( + append_position=appendpos_condition + ) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + + options = { + 'timeout': kwargs.pop('timeout', None), + 'lease_access_conditions': access_conditions, + 'append_position_access_conditions': append_conditions, + 'modified_access_conditions': mod_conditions, + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return self._client.append_blob.seal(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_container_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> ContainerClient + """Get a client to interact with the blob's parent container. + + The container need not already exist. Defaults to current blob's credentials. + + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_client_from_blob_client] + :end-before: [END get_container_client_from_blob_client] + :language: python + :dedent: 8 + :caption: Get container client from blob object. + """ + from ._container_client import ContainerClient + if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_blob_service_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_blob_service_client.py new file mode 100644 index 00000000000..8db8b22077c --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_blob_service_client.py @@ -0,0 +1,782 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +import warnings +from typing import ( + Any, Dict, List, Optional, Union, + TYPE_CHECKING +) +from urllib.parse import urlparse + +from typing_extensions import Self + +from azure.core.exceptions import HttpResponseError +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.models import LocationMode +from ._shared.parser import _to_utc_datetime +from ._shared.response_handlers import ( + return_response_headers, + process_storage_error, + parse_to_internal_user_delegation_key +) +from ._generated import AzureBlobStorage +from ._generated.models import StorageServiceProperties, KeyInfo +from ._container_client import ContainerClient +from ._blob_client import BlobClient +from ._deserialize import service_stats_deserialize, service_properties_deserialize +from ._encryption import StorageEncryptionMixin +from ._list_blobs_helper import FilteredBlobPaged +from ._models import ContainerPropertiesPaged +from ._serialize import get_api_version + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from datetime import datetime + from ._shared.models import UserDelegationKey + from ._lease import BlobLeaseClient + from ._models import ( + ContainerProperties, + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + FilteredBlob + ) + + +class BlobServiceClient(StorageAccountHostsMixin, StorageEncryptionMixin): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> None: + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self._query_str, credential = self._format_query_string(sas_token, credential) + super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._configure_encryption(kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create BlobServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A Blob service client. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string] + :end-before: [END auth_from_connection_string] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 8 + :caption: Getting account information for the blob service. + """ + try: + return self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_service_stats(self, **kwargs): + # type: (**Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 8 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 8 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 8 + :caption: Setting service properties for the blob service. + """ + if all(parameter is None for parameter in [ + analytics_logging, hour_metrics, minute_metrics, cors, + target_version, delete_retention_policy, static_website]): + raise ValueError("set_service_properties should be called with at least one parameter") + + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword bool include_system: + Flag specifying that system containers should be included. + .. versionadded:: 12.10.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 12 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + include_system = kwargs.pop('include_system', None) + if include_system: + include.append("system") + + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> ItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace + def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 12 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace + def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 12 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace + def _rename_container(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str name: + The name of the container to rename. + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.ContainerClient + """ + renamed_container = self.get_container_client(new_name) + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + if new_name: + warnings.warn("`new_name` is no longer supported.", DeprecationWarning) + container = self.get_container_client(new_name or deleted_container_name) + try: + container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except HttpResponseError as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 8 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by :func:`~azure.storage.blob.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 12 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_container_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_container_client.py new file mode 100644 index 00000000000..15e45143a52 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_container_client.py @@ -0,0 +1,1769 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +from typing import ( + Any, AnyStr, Dict, List, IO, Iterable, Iterator, Optional, overload, Union, + TYPE_CHECKING +) +from urllib.parse import urlparse, quote, unquote + +from typing_extensions import Self + +from azure.core import MatchConditions +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized +) +from ._generated import AzureBlobStorage +from ._generated.models import SignedIdentifier +from ._blob_client import BlobClient +from ._deserialize import deserialize_container_properties +from ._download import StorageStreamDownloader +from ._encryption import StorageEncryptionMixin +from ._lease import BlobLeaseClient +from ._list_blobs_helper import ( + BlobNamesPaged, + BlobPrefix, + BlobPropertiesPaged, + FilteredBlobPaged, + IgnoreListBlobsDeserializer +) +from ._models import ( + ContainerProperties, + BlobProperties, + BlobType, + FilteredBlob +) +from ._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from datetime import datetime + from ._models import ( # pylint: disable=unused-import + PublicAccess, + AccessPolicy, + StandardBlobTier, + PremiumPageBlobTier) + + +def _get_blob_name(blob): + """Return the blob name. + + :param blob: A blob string or BlobProperties + :rtype: str + """ + try: + return blob.get('name') + except AttributeError: + return blob + + +class ContainerClient(StorageAccountHostsMixin, StorageEncryptionMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 8 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url: str, + container_name: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> None: + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not container_name: + raise ValueError("Please specify a container name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + self.container_name = container_name + # This parameter is used for the hierarchy traversal. Give precedence to credential. + self._raw_credential = credential if credential else sas_token + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs) + self._api_version = get_api_version(kwargs) + self._client = self._build_generated_client() + self._configure_encryption(kwargs) + + def _build_generated_client(self): + client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline) + client._config.version = self._api_version # pylint: disable=protected-access + return client + + def _format_url(self, hostname): + container_name = self.container_name + if isinstance(container_name, str): + container_name = container_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(container_name), + self._query_str) + + @classmethod + def from_container_url( + cls, container_url: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create ContainerClient from a container url. + + :param str container_url: + The full endpoint URL to the Container, including SAS token if used. This could be + either the primary endpoint, or the secondary endpoint depending on the current `location_mode`. + :type container_url: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + """ + try: + if not container_url.lower().startswith('http'): + container_url = "https://" + container_url + except AttributeError: + raise ValueError("Container URL must be a string.") + parsed_url = urlparse(container_url) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(container_url)) + + container_path = parsed_url.path.strip('/').split('/') + account_path = "" + if len(container_path) > 1: + account_path = "/" + "/".join(container_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + container_name = unquote(container_path[-1]) + if not container_name: + raise ValueError("Invalid URL. Please provide a URL with a valid container name") + return cls(account_url, container_name=container_name, credential=credential, **kwargs) + + @classmethod + def from_connection_string( + cls, conn_str: str, + container_name: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create ContainerClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param container_name: + The container name for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token, or the connection string already has shared + access key values. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + Credentials provided here will take precedence over those in the connection string. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A container client. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START auth_from_connection_string_container] + :end-before: [END auth_from_connection_string_container] + :language: python + :dedent: 8 + :caption: Creating the ContainerClient from a connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, container_name=container_name, credential=credential, **kwargs) + + @distributed_trace + def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> Dict[str, Union[str, datetime]] + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A dictionary of response headers. + :rtype: Dict[str, Union[str, datetime]] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 12 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + timeout = kwargs.pop('timeout', None) + headers.update(add_metadata_headers(metadata)) # type: ignore + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _rename_container(self, new_name, **kwargs): + # type: (str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.ContainerClient + """ + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container = ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) + renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 12 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 8 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace + def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_container_properties(self, **kwargs): + # type: (Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 12 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a container exists and returns False otherwise. + + :kwarg int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: boolean + """ + try: + self._client.container.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 12 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> BlobServiceClient + """Get a client to interact with the container's parent service account. + + Defaults to current container's credentials. + + :returns: A BlobServiceClient. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service.py + :start-after: [START get_blob_service_client_from_container_client] + :end-before: [END get_blob_service_client_from_container_client] + :language: python + :dedent: 8 + :caption: Get blob service client from container object. + """ + from ._blob_service_client import BlobServiceClient + if not isinstance(self._pipeline._transport, TransportWrapper): # pylint: disable = protected-access + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return BlobServiceClient( + "{}://{}".format(self.scheme, self.primary_hostname), + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + encryption_version=self.encryption_version, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, _pipeline=_pipeline) + + @distributed_trace + def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 12 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 12 + :caption: Setting access policy on the container. + """ + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + lease = kwargs.pop('lease', None) + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + return self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', + 'tags', 'versions', 'immutabilitypolicy', 'legalhold'. + :type include: list[str] or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 8 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged) + + @distributed_trace + def list_blob_names(self, **kwargs: Any) -> ItemPaged[str]: + """Returns a generator to list the names of blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + Note that no additional properties or metadata will be returned when using this API. + Additionally, this API does not have an option to include additional blobs such as snapshots, + versions, soft-deleted blobs, etc. To get any of this data, use :func:`list_blobs()`. + + :keyword str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of blob names as strings. + :rtype: ~azure.core.paging.ItemPaged[str] + """ + name_starts_with = kwargs.pop('name_starts_with', None) + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + + # For listing only names we need to create a one-off generated client and + # override its deserializer to prevent deserialization of the full response. + client = self._build_generated_client() + client.container._deserialize = IgnoreListBlobsDeserializer() # pylint: disable=protected-access + + command = functools.partial( + client.container.list_blob_flat_segment, + timeout=timeout, + **kwargs) + return ItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=BlobNamesPaged) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Union[List[str], str]] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> ItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', + 'tags', 'versions', 'immutabilitypolicy', 'legalhold'. + :type include: list[str] or str + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace + def find_blobs_by_tags( + self, filter_expression, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> ItemPaged[FilteredBlob] + """Returns a generator to list the blobs under the specified container whose tags + match the given search expression. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of FilteredBlob. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + """ + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.filter_blobs, + timeout=timeout, + where=filter_expression, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace + def upload_blob( + self, name: Union[str, BlobProperties], + data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]], + blob_type: Union[str, BlobType] = BlobType.BlockBlob, + length: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ) -> BlobClient: + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword progress_hook: + A callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transfered + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], None] + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 8 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace + def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blob or snapshot is accessible through :func:`list_blobs()` specifying `include=["deleted"]` + option. Soft-deleted blob or snapshot can be restored using :func:`~azure.storage.blob.BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + blob_client.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @overload + def download_blob( + self, blob: Union[str, BlobProperties], + offset: int = None, + length: int = None, + *, + encoding: str, + **kwargs) -> StorageStreamDownloader[str]: + ... + + @overload + def download_blob( + self, blob: Union[str, BlobProperties], + offset: int = None, + length: int = None, + *, + encoding: None = None, + **kwargs) -> StorageStreamDownloader[bytes]: + ... + + @distributed_trace + def download_blob( + self, blob: Union[str, BlobProperties], + offset: int = None, + length: int = None, + *, + encoding: Optional[str] = None, + **kwargs) -> StorageStreamDownloader: + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword progress_hook: + A callback to track the progress of a long running download. The signature is + function(current: int, total: int) where current is the number of bytes transfered + so far, and total is the total size of the download. + :paramtype progress_hook: Callable[[int, int], None] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return blob_client.download_blob( + offset=offset, + length=length, + encoding=encoding, + **kwargs) + + def _generate_delete_blobs_subrequest_options( + self, snapshot=None, + version_id=None, + delete_snapshots=None, + lease_access_conditions=None, + modified_access_conditions=None, + **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + if_tags = None + if modified_access_conditions is not None: + if_tags = modified_access_conditions.if_tags + + # Construct parameters + timeout = kwargs.pop('timeout', None) + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if version_id is not None: + query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + + # Construct headers + header_parameters = {} + if delete_snapshots is not None: + header_parameters['x-ms-delete-snapshots'] = self._client._serialize.header( # pylint: disable=protected-access + "delete_snapshots", delete_snapshots, 'DeleteSnapshotsOptionType') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header( # pylint: disable=protected-access + "lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._client._serialize.header( # pylint: disable=protected-access + "if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._client._serialize.header( # pylint: disable=protected-access + "if_none_match", if_none_match, 'str') + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_delete_blobs_options( + self, *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + delete_snapshots = kwargs.pop('delete_snapshots', None) + if_modified_since = kwargs.pop('if_modified_since', None) + if_unmodified_since = kwargs.pop('if_unmodified_since', None) + if_tags_match_condition = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "", + 'path': self.container_name, + 'restype': 'restype=container&' + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + snapshot=blob.get('snapshot'), + version_id=blob.get('version_id'), + delete_snapshots=delete_snapshots or blob.get('delete_snapshots'), + lease=blob.get('lease_id'), + if_modified_since=if_modified_since or blob.get('if_modified_since'), + if_unmodified_since=if_unmodified_since or blob.get('if_unmodified_since'), + etag=blob.get('etag'), + if_tags_match_condition=if_tags_match_condition or blob.get('if_tags_match_condition'), + match_condition=blob.get('match_condition') or MatchConditions.IfNotModified if blob.get('etag') + else None, + timeout=blob.get('timeout'), + ) + except AttributeError: + options = BlobClient._generic_delete_blob_options( # pylint: disable=protected-access + delete_snapshots=delete_snapshots, + if_modified_since=if_modified_since, + if_unmodified_since=if_unmodified_since, + if_tags_match_condition=if_tags_match_condition + ) + + query_parameters, header_parameters = self._generate_delete_blobs_subrequest_options(**options) + + req = HttpRequest( + "DELETE", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def delete_blobs( + self, *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ) -> Iterator[HttpResponse]: + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~azure.storage.blob.BlobClient.undelete()` + + The maximum number of blobs that can be deleted in a single request is 256. + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + version id: + key: 'version_id', value type: str + whether to delete snapshots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 8 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def _generate_set_tiers_subrequest_options( + self, tier, snapshot=None, version_id=None, rehydrate_priority=None, lease_access_conditions=None, **kwargs + ): + """This code is a copy from _generated. + + Once Autorest is able to provide request preparation this code should be removed. + """ + if not tier: + raise ValueError("A blob tier must be specified") + if snapshot and version_id: + raise ValueError("Snapshot and version_id cannot be set at the same time") + if_tags = kwargs.pop('if_tags', None) + + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + + comp = "tier" + timeout = kwargs.pop('timeout', None) + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._client._serialize.query("snapshot", snapshot, 'str') # pylint: disable=protected-access + if version_id is not None: + query_parameters['versionid'] = self._client._serialize.query("version_id", version_id, 'str') # pylint: disable=protected-access + if timeout is not None: + query_parameters['timeout'] = self._client._serialize.query("timeout", timeout, 'int', minimum=0) # pylint: disable=protected-access + query_parameters['comp'] = self._client._serialize.query("comp", comp, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + + # Construct headers + header_parameters = {} + header_parameters['x-ms-access-tier'] = self._client._serialize.header("tier", tier, 'str') # pylint: disable=protected-access, specify-parameter-names-in-call + if rehydrate_priority is not None: + header_parameters['x-ms-rehydrate-priority'] = self._client._serialize.header( # pylint: disable=protected-access + "rehydrate_priority", rehydrate_priority, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._client._serialize.header("lease_id", lease_id, 'str') # pylint: disable=protected-access + if if_tags is not None: + header_parameters['x-ms-if-tags'] = self._client._serialize.header("if_tags", if_tags, 'str') # pylint: disable=protected-access + + return query_parameters, header_parameters + + def _generate_set_tiers_options( + self, blob_tier: Optional[Union[str, 'StandardBlobTier', 'PremiumPageBlobTier']], + *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ): + timeout = kwargs.pop('timeout', None) + raise_on_any_failure = kwargs.pop('raise_on_any_failure', True) + rehydrate_priority = kwargs.pop('rehydrate_priority', None) + if_tags = kwargs.pop('if_tags_match_condition', None) + kwargs.update({'raise_on_any_failure': raise_on_any_failure, + 'sas': self._query_str.replace('?', '&'), + 'timeout': '&timeout=' + str(timeout) if timeout else "", + 'path': self.container_name, + 'restype': 'restype=container&' + }) + + reqs = [] + for blob in blobs: + blob_name = _get_blob_name(blob) + container_name = self.container_name + + try: + tier = blob_tier or blob.get('blob_tier') + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + tier=tier, + snapshot=blob.get('snapshot'), + version_id=blob.get('version_id'), + rehydrate_priority=rehydrate_priority or blob.get('rehydrate_priority'), + lease_access_conditions=blob.get('lease_id'), + if_tags=if_tags or blob.get('if_tags_match_condition'), + timeout=timeout or blob.get('timeout') + ) + except AttributeError: + query_parameters, header_parameters = self._generate_set_tiers_subrequest_options( + blob_tier, rehydrate_priority=rehydrate_priority, if_tags=if_tags) + + req = HttpRequest( + "PUT", + "/{}/{}{}".format(quote(container_name), quote(blob_name, safe='/~'), self._query_str), + headers=header_parameters + ) + req.format_parameters(query_parameters) + reqs.append(req) + + return reqs, kwargs + + @distributed_trace + def set_standard_blob_tier_blobs( + self, standard_blob_tier: Optional[Union[str, 'StandardBlobTier']], + *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ) -> Iterator[HttpResponse]: + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + The maximum number of blobs that can be updated in a single request is 256. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + snapshot: + key: "snapshot", value type: str + version id: + key: "version_id", value type: str + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: Iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + @distributed_trace + def set_premium_page_blob_tier_blobs( + self, premium_page_blob_tier: Optional[Union[str, 'PremiumPageBlobTier']], + *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ) -> Iterator[HttpResponse]: + """Sets the page blob tiers on all blobs. This API is only supported for page blobs on premium accounts. + + The maximum number of blobs that can be updated in a single request is 256. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. + :return: An iterator of responses, one for each blob in order + :rtype: iterator[~azure.core.pipeline.transport.HttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[str, BlobProperties] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 8 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_deserialize.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_deserialize.py new file mode 100644 index 00000000000..29a72a058b3 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_deserialize.py @@ -0,0 +1,211 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from typing import ( + Dict, List, Optional, Tuple, Union, + TYPE_CHECKING +) +from urllib.parse import unquote +from xml.etree.ElementTree import Element + +from ._models import ( + BlobAnalyticsLogging, + BlobProperties, + BlobType, + ContainerProperties, + ContentSettings, + CopyProperties, + CorsRule, + ImmutabilityPolicy, + LeaseProperties, + Metrics, + ObjectReplicationPolicy, + ObjectReplicationRule, + RetentionPolicy, + StaticWebsite, +) +from ._shared.models import get_enum_value +from ._shared.response_handlers import deserialize_metadata + +if TYPE_CHECKING: + from ._generated.models import BlobTag, PageList + + +def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers): + try: + deserialized_response = response.http_response + except AttributeError: + deserialized_response = response + return cls_method(deserialized_response, obj, headers) + + +def deserialize_blob_properties(response, obj, headers): + blob_properties = BlobProperties( + metadata=deserialize_metadata(response, obj, headers), + object_replication_source_properties=deserialize_ors_policies(response.http_response.headers), + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-blob-content-md5' in headers: + blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] + else: + blob_properties.content_settings.content_md5 = None + return blob_properties + + +def deserialize_ors_policies(policy_dictionary): + + if policy_dictionary is None: + return None + # For source blobs (blobs that have policy ids and rule ids applied to them), + # the header will be formatted as "x-ms-or-_: {Complete, Failed}". + # The value of this header is the status of the replication. + or_policy_status_headers = {key: val for key, val in policy_dictionary.items() + if 'or-' in key and key != 'x-ms-or-policy-id'} + + parsed_result = {} + + for key, val in or_policy_status_headers.items(): + # list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule + policy_and_rule_ids = key.split('or-')[1].split('_') + policy_id = policy_and_rule_ids[0] + rule_id = policy_and_rule_ids[1] + + # If we are seeing this policy for the first time, create a new list to store rule_id -> result + parsed_result[policy_id] = parsed_result.get(policy_id) or list() + parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val)) + + result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()] + + return result_list + + +def deserialize_blob_stream(response, obj, headers): + blob_properties = deserialize_blob_properties(response, obj, headers) + obj.properties = blob_properties + return response.http_response.location_mode, obj + + +def deserialize_container_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + container_properties = ContainerProperties( + metadata=metadata, + **headers + ) + return container_properties + + +def get_page_ranges_result(ranges): + # type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + page_range = [] # type: ignore + clear_range = [] # type: List + if ranges.page_range: + page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore + if ranges.clear_range: + clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range] + return page_range, clear_range # type: ignore + + +def service_stats_deserialize(generated): + """Deserialize a ServiceStats objects into a dict. + """ + return { + 'geo_replication': { + 'status': generated.geo_replication.status, + 'last_sync_time': generated.geo_replication.last_sync_time, + } + } + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + 'target_version': generated.default_service_version, # pylint: disable=protected-access + 'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access + 'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access + } + + +def get_blob_properties_from_generated_code(generated): + blob = BlobProperties() + if generated.name.encoded: + blob.name = unquote(generated.name.content) + else: + blob.name = generated.name.content + blob_type = get_enum_value(generated.properties.blob_type) + blob.blob_type = BlobType(blob_type) if blob_type else None + blob.etag = generated.properties.etag + blob.deleted = generated.deleted + blob.snapshot = generated.snapshot + blob.is_append_blob_sealed = generated.properties.is_sealed + blob.metadata = generated.metadata.additional_properties if generated.metadata else {} + blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None + blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access + blob.last_modified = generated.properties.last_modified + blob.creation_time = generated.properties.creation_time + blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access + blob.size = generated.properties.content_length + blob.page_blob_sequence_number = generated.properties.blob_sequence_number + blob.server_encrypted = generated.properties.server_encrypted + blob.encryption_scope = generated.properties.encryption_scope + blob.deleted_time = generated.properties.deleted_time + blob.remaining_retention_days = generated.properties.remaining_retention_days + blob.blob_tier = generated.properties.access_tier + blob.rehydrate_priority = generated.properties.rehydrate_priority + blob.blob_tier_inferred = generated.properties.access_tier_inferred + blob.archive_status = generated.properties.archive_status + blob.blob_tier_change_time = generated.properties.access_tier_change_time + blob.version_id = generated.version_id + blob.is_current_version = generated.is_current_version + blob.tag_count = generated.properties.tag_count + blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access + blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata) + blob.last_accessed_on = generated.properties.last_accessed_on + blob.immutability_policy = ImmutabilityPolicy._from_generated(generated) # pylint: disable=protected-access + blob.has_legal_hold = generated.properties.legal_hold + blob.has_versions_only = generated.has_versions_only + return blob + + +def parse_tags(generated_tags): + # type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None] + """Deserialize a list of BlobTag objects into a dict. + """ + if generated_tags: + tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set} + return tag_dict + return None + + +def load_single_xml_node(element: Element, name: str) -> Union[Element, None]: + return element.find(name) + + +def load_many_xml_nodes(element: Element, name: str, wrapper: Element = None) -> List[Union[Element, None]]: + if wrapper: + element = load_single_xml_node(element, wrapper) + return list(element.findall(name)) + + +def load_xml_string(element: Element, name: str) -> str: + node = element.find(name) + if node is None or not node.text: + return None + return node.text + + +def load_xml_int(element: Element, name: str) -> int: + node = element.find(name) + if node is None or not node.text: + return None + return int(node.text) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_download.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_download.py new file mode 100644 index 00000000000..e7b77526a26 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_download.py @@ -0,0 +1,797 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import time +import warnings +from io import BytesIO +from typing import Generic, IO, Iterator, Optional, TypeVar + +from azure.core.exceptions import DecodeError, HttpResponseError, IncompleteReadError +from azure.core.tracing.common import with_current_context + +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range +from ._deserialize import deserialize_blob_properties, get_page_ranges_result +from ._encryption import ( + adjust_blob_size_for_encryption, + decrypt_blob, + get_adjusted_download_range_and_offset, + is_encryption_v2, + parse_encryption_data +) + +T = TypeVar('T', bytes, str) + + +def process_range_and_offset(start_range, end_range, length, encryption_options, encryption_data): + start_offset, end_offset = 0, 0 + if encryption_options.get("key") is not None or encryption_options.get("resolver") is not None: + return get_adjusted_download_range_and_offset( + start_range, + end_range, + length, + encryption_data) + + return (start_range, end_range), (start_offset, end_offset) + + +def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + + content = b"".join(list(data)) + + if content and encryption.get("key") is not None or encryption.get("resolver") is not None: + try: + return decrypt_blob( + encryption.get("required"), + encryption.get("key"), + encryption.get("resolver"), + content, + start_offset, + end_offset, + data.response.headers, + ) + except Exception as error: + raise HttpResponseError(message="Decryption failed.", response=data.response, error=error) + return content + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + non_empty_ranges=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + encryption_options=None, + encryption_data=None, + progress_hook=None, + **kwargs + ): + self.client = client + self.non_empty_ranges = non_empty_ranges + + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + self.progress_hook = progress_hook + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Encryption + self.encryption_options = encryption_options + self.encryption_data = encryption_data + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + self.progress_hook(self.progress_total, self.total_size) + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _do_optimize(self, given_range_start, given_range_end): + # If we have no page range list stored, then assume there's data everywhere for that page blob + # or it's a block blob or append blob + if self.non_empty_ranges is None: + return False + + for source_range in self.non_empty_ranges: + # Case 1: As the range list is sorted, if we've reached such a source_range + # we've checked all the appropriate source_range already and haven't found any overlapping. + # so the given range doesn't have any data and download optimization could be applied. + # given range: | | + # source range: | | + if given_range_end < source_range['start']: # pylint:disable=no-else-return + return True + # Case 2: the given range comes after source_range, continue checking. + # given range: | | + # source range: | | + elif source_range['end'] < given_range_start: + pass + # Case 3: source_range and given range overlap somehow, no need to optimize. + else: + return False + # Went through all src_ranges, but nothing overlapped. Optimization will be applied. + return True + + def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options, self.encryption_data + ) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + + retry_active = True + retry_total = 3 + while retry_active: + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + try: + chunk_data = process_content(response, offset[0], offset[1], self.encryption_options) + retry_active = False + except (IncompleteReadError, HttpResponseError, DecodeError) as error: + retry_total -= 1 + if retry_total <= 0: + raise HttpResponseError(error, error=error) + time.sleep(1) + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get("modified_access_conditions"): + self.request_options["modified_access_conditions"].if_match = response.properties.etag + + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += self._iter_downloader.yield_chunk(chunk) + except StopIteration as e: + self._complete = True + if self._current_content: + return self._current_content + raise e + + # the current content from the first get is still there but smaller than chunk size + # therefore we want to make sure its also included + return self._get_chunk_data() + + next = __next__ # Python 2 compatibility. + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(Generic[T]): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if specified, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + download_cls=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._progress_hook = kwargs.pop('progress_hook', None) + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + self._encryption_data = None + self._offset = 0 + + # The cls is passed in via download_cls to avoid conflicting arg name with Generic.__new__ + # but needs to be changed to cls in the request options. + self._request_options['cls'] = download_cls + + if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None: + self._get_encryption_data_request() + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, + initial_request_end, + self._end_range, + self._encryption_options, + self._encryption_data + ) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + def __len__(self): + return self.size + + def _get_encryption_data_request(self): + # Save current request cls + download_cls = self._request_options.pop('cls', None) + # Adjust cls for get_properties + self._request_options['cls'] = deserialize_blob_properties + + properties = self._clients.blob.get_properties(**self._request_options) + # This will return None if there is no encryption metadata or there are parsing errors. + # That is acceptable here, the proper error will be caught and surfaced when attempting + # to decrypt the blob. + self._encryption_data = parse_encryption_data(properties.metadata) + + # Restore cls for download + self._request_options['cls'] = download_cls + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + retry_active = True + retry_total = 3 + while retry_active: + try: + location_mode, response = self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._file_size is None: + raise ValueError("Required Content-Range response header is missing or malformed.") + # Remove any extra encryption data size from blob size + self._file_size = adjust_blob_size_for_encryption(self._file_size, self._encryption_data) + + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + try: + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content( + response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + retry_active = False + except (IncompleteReadError, HttpResponseError, DecodeError) as error: + retry_total -= 1 + if retry_total <= 0: + raise HttpResponseError(error, error=error) + time.sleep(1) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + # according to the REST API documentation: + # in a highly fragmented page blob with a large number of writes, + # a Get Page Ranges request can fail due to an internal server timeout. + # thus, if the page blob is not sparse, it's ok for it to fail + except HttpResponseError: + pass + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + # For encryption V2, calculate based on size of decrypted content, not download size. + if is_encryption_v2(self._encryption_data): + self._download_complete = len(self._current_content) >= self.size + else: + self._download_complete = response.properties.size >= self.size + + if not self._download_complete and self._request_options.get("modified_access_conditions"): + self._request_options["modified_access_conditions"].if_match = response.properties.etag + + return response + + def _get_downloader_start_with_offset(self): + # Start where the initial request download ended + start = self._initial_range[1] + 1 + # For encryption V2 only, adjust start to the end of the fetched data rather than download size + if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None: + start = (self._start_range or 0) + len(self._current_content) + + # Adjust the start based on any data read past the current content + start += (self._offset - len(self._current_content)) + return start + + def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world.py + :start-after: [START download_a_blob_in_chunk] + :end-before: [END download_a_blob_in_chunk] + :language: python + :dedent: 12 + :caption: Download a blob using chunks(). + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + data_start = self._initial_range[1] + 1 # Start where the first download ended + # For encryption, adjust start to the end of the fetched data rather than download size + if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None: + data_start = (self._start_range or 0) + len(self._current_content) + + iter_downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=data_start, + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + encryption_data=self._encryption_data, + use_location=self._location_mode, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + def read(self, size: Optional[int] = -1) -> T: + """ + Read up to size bytes from the stream and return them. If size + is unspecified or is -1, all bytes will be read. + + :param size: + The number of bytes to download from the stream. Leave unspecified + or set to -1 to download all bytes. + :returns: + The requested data as bytes or a string if encoding was specified. If + the return value is empty, there is no more data to read. + :rtype: T + """ + if size == -1: + return self.readall() + # Empty blob or already read to the end + if size == 0 or self._offset >= self.size: + return b'' if not self._encoding else '' + + stream = BytesIO() + remaining_size = size + + # Start by reading from current_content if there is data left + if self._offset < len(self._current_content): + start = self._offset + length = min(remaining_size, len(self._current_content) - self._offset) + read = stream.write(self._current_content[start:start + length]) + + remaining_size -= read + self._offset += read + if self._progress_hook: + self._progress_hook(self._offset, self.size) + + if remaining_size > 0: + start_range = self._get_downloader_start_with_offset() + + # End is the min between the remaining size, the file size, and the end of the specified range + end_range = min(start_range + remaining_size, self._file_size) + if self._end_range is not None: + end_range = min(end_range, self._end_range + 1) + + parallel = self._max_concurrency > 1 + downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._offset, + start_range=start_range, + end_range=end_range, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + encryption_data=self._encryption_data, + use_location=self._location_mode, + progress_hook=self._progress_hook, + **self._request_options + ) + + if parallel and remaining_size > self._config.max_chunk_get_size: + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + + self._offset += remaining_size + + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def readall(self) -> T: + """ + Read the entire contents of this blob. + This operation is blocking until all data is downloaded. + + :returns: The requested data as bytes or a string if encoding was specified. + :rtype: T + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """DEPRECATED: Download the contents of this file. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """DEPRECATED: Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream: IO[T]) -> int: + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # If some data has been streamed using `read`, only stream the remaining data + remaining_size = self.size - self._offset + # Already read to the end + if remaining_size <= 0: + return 0 + + # Write the content to the user stream if there is data left + if self._offset < len(self._current_content): + content = self._current_content[self._offset:] + stream.write(content) + self._offset += len(content) + if self._progress_hook: + self._progress_hook(len(content), self.size) + + if self._download_complete: + return remaining_size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + data_start = self._get_downloader_start_with_offset() + + downloader = _ChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._offset, + start_range=data_start, + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + encryption_data=self._encryption_data, + use_location=self._location_mode, + progress_hook=self._progress_hook, + **self._request_options + ) + if parallel: + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + + return remaining_size + + def download_to_stream(self, stream, max_concurrency=1): + """DEPRECATED: Download the contents of this blob to a stream. + + This method is deprecated, use func:`readinto` instead. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_encryption.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_encryption.py new file mode 100644 index 00000000000..c821e2eccb7 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_encryption.py @@ -0,0 +1,979 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os +import math +import sys +import warnings +from collections import OrderedDict +from io import BytesIO +from json import ( + dumps, + loads, +) +from typing import Any, BinaryIO, Dict, Optional, Tuple + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.ciphers import Cipher +from cryptography.hazmat.primitives.ciphers.aead import AESGCM +from cryptography.hazmat.primitives.ciphers.algorithms import AES +from cryptography.hazmat.primitives.ciphers.modes import CBC +from cryptography.hazmat.primitives.padding import PKCS7 + +from azure.core.exceptions import HttpResponseError + +from ._version import VERSION +from ._shared import encode_base64, decode_base64_to_bytes + + +_ENCRYPTION_PROTOCOL_V1 = '1.0' +_ENCRYPTION_PROTOCOL_V2 = '2.0' +_GCM_REGION_DATA_LENGTH = 4 * 1024 * 1024 +_GCM_NONCE_LENGTH = 12 +_GCM_TAG_LENGTH = 16 + +_ERROR_OBJECT_INVALID = \ + '{0} does not define a complete interface. Value of {1} is either missing or invalid.' + + +def _validate_not_none(param_name, param): + if param is None: + raise ValueError('{0} should not be None.'.format(param_name)) + + +def _validate_key_encryption_key_wrap(kek): + # Note that None is not callable and so will fail the second clause of each check. + if not hasattr(kek, 'wrap_key') or not callable(kek.wrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'wrap_key')) + if not hasattr(kek, 'get_kid') or not callable(kek.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(kek, 'get_key_wrap_algorithm') or not callable(kek.get_key_wrap_algorithm): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_key_wrap_algorithm')) + + +class StorageEncryptionMixin(object): + def _configure_encryption(self, kwargs): + self.require_encryption = kwargs.get("require_encryption", False) + self.encryption_version = kwargs.get("encryption_version", "1.0") + self.key_encryption_key = kwargs.get("key_encryption_key") + self.key_resolver_function = kwargs.get("key_resolver_function") + if self.key_encryption_key and self.encryption_version == '1.0': + warnings.warn("This client has been configured to use encryption with version 1.0. " + + "Version 1.0 is deprecated and no longer considered secure. It is highly " + + "recommended that you switch to using version 2.0. The version can be " + + "specified using the 'encryption_version' keyword.") + + +class _EncryptionAlgorithm(object): + ''' + Specifies which client encryption algorithm is used. + ''' + AES_CBC_256 = 'AES_CBC_256' + AES_GCM_256 = 'AES_GCM_256' + + +class _WrappedContentKey: + ''' + Represents the envelope key details stored on the service. + ''' + + def __init__(self, algorithm, encrypted_key, key_id): + ''' + :param str algorithm: + The algorithm used for wrapping. + :param bytes encrypted_key: + The encrypted content-encryption-key. + :param str key_id: + The key-encryption-key identifier string. + ''' + + _validate_not_none('algorithm', algorithm) + _validate_not_none('encrypted_key', encrypted_key) + _validate_not_none('key_id', key_id) + + self.algorithm = algorithm + self.encrypted_key = encrypted_key + self.key_id = key_id + + +class _EncryptedRegionInfo: + ''' + Represents the length of encryption elements. + This is only used for Encryption V2. + ''' + + def __init__(self, data_length, nonce_length, tag_length): + ''' + :param int data_length: + The length of the encryption region data (not including nonce + tag). + :param str nonce_length: + The length of nonce used when encrypting. + :param int tag_length: + The length of the encryption tag. + ''' + _validate_not_none('data_length', data_length) + _validate_not_none('nonce_length', nonce_length) + _validate_not_none('tag_length', tag_length) + + self.data_length = data_length + self.nonce_length = nonce_length + self.tag_length = tag_length + + +class _EncryptionAgent: + ''' + Represents the encryption agent stored on the service. + It consists of the encryption protocol version and encryption algorithm used. + ''' + + def __init__(self, encryption_algorithm, protocol): + ''' + :param _EncryptionAlgorithm encryption_algorithm: + The algorithm used for encrypting the message contents. + :param str protocol: + The protocol version used for encryption. + ''' + + _validate_not_none('encryption_algorithm', encryption_algorithm) + _validate_not_none('protocol', protocol) + + self.encryption_algorithm = str(encryption_algorithm) + self.protocol = protocol + + +class _EncryptionData: + ''' + Represents the encryption data that is stored on the service. + ''' + + def __init__( + self, + content_encryption_IV, + encrypted_region_info, + encryption_agent, + wrapped_content_key, + key_wrapping_metadata): + ''' + :param Optional[bytes] content_encryption_IV: + The content encryption initialization vector. + Required for AES-CBC (V1). + :param Optional[_EncryptedRegionInfo] encrypted_region_info: + The info about the autenticated block sizes. + Required for AES-GCM (V2). + :param _EncryptionAgent encryption_agent: + The encryption agent. + :param _WrappedContentKey wrapped_content_key: + An object that stores the wrapping algorithm, the key identifier, + and the encrypted key bytes. + :param dict key_wrapping_metadata: + A dict containing metadata related to the key wrapping. + ''' + + _validate_not_none('encryption_agent', encryption_agent) + _validate_not_none('wrapped_content_key', wrapped_content_key) + + # Validate we have the right matching optional parameter for the specified algorithm + if encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_CBC_256: + _validate_not_none('content_encryption_IV', content_encryption_IV) + elif encryption_agent.encryption_algorithm == _EncryptionAlgorithm.AES_GCM_256: + _validate_not_none('encrypted_region_info', encrypted_region_info) + else: + raise ValueError("Invalid encryption algorithm.") + + self.content_encryption_IV = content_encryption_IV + self.encrypted_region_info = encrypted_region_info + self.encryption_agent = encryption_agent + self.wrapped_content_key = wrapped_content_key + self.key_wrapping_metadata = key_wrapping_metadata + + +class GCMBlobEncryptionStream: + """ + A stream that performs AES-GCM encryption on the given data as + it's streamed. Data is read and encrypted in regions. The stream + will use the same encryption key and will generate a guaranteed unique + nonce for each encryption region. + """ + def __init__( + self, + content_encryption_key: bytes, + data_stream: BinaryIO, + ): + """ + :param bytes content_encryption_key: The encryption key to use. + :param BinaryIO data_stream: The data stream to read data from. + """ + self.content_encryption_key = content_encryption_key + self.data_stream = data_stream + + self.offset = 0 + self.current = b'' + self.nonce_counter = 0 + + def read(self, size: int = -1) -> bytes: + """ + Read data from the stream. Specify -1 to read all available data. + + :param int size: The amount of data to read. Defaults to -1 for all data. + """ + result = BytesIO() + remaining = sys.maxsize if size == -1 else size + + while remaining > 0: + # Start by reading from current + if len(self.current) > 0: + read = min(remaining, len(self.current)) + result.write(self.current[:read]) + + self.current = self.current[read:] + self.offset += read + remaining -= read + + if remaining > 0: + # Read one region of data and encrypt it + data = self.data_stream.read(_GCM_REGION_DATA_LENGTH) + if len(data) == 0: + # No more data to read + break + + self.current = self._encrypt_region(data) + + return result.getvalue() + + def _encrypt_region(self, data: bytes) -> bytes: + """ + Encrypt the given region of data using AES-GCM. The result + includes the data in the form: nonce + ciphertext + tag. + + :param bytes data: The data to encrypt. + """ + # Each region MUST use a different nonce + nonce = self.nonce_counter.to_bytes(_GCM_NONCE_LENGTH, 'big') + self.nonce_counter += 1 + + aesgcm = AESGCM(self.content_encryption_key) + + # Returns ciphertext + tag + cipertext_with_tag = aesgcm.encrypt(nonce, data, None) + return nonce + cipertext_with_tag + + +def is_encryption_v2(encryption_data: Optional[_EncryptionData]) -> bool: + """ + Determine whether the given encryption data signifies version 2.0. + + :param Optional[_EncryptionData] encryption_data: The encryption data. Will return False if this is None. + """ + # If encryption_data is None, assume no encryption + return encryption_data and encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2 + + +def get_adjusted_upload_size(length: int, encryption_version: str) -> int: + """ + Get the adjusted size of the blob upload which accounts for + extra encryption data (padding OR nonce + tag). + + :param int length: The plaintext data length. + :param str encryption_version: The version of encryption being used. + """ + if encryption_version == _ENCRYPTION_PROTOCOL_V1: + return length + (16 - (length % 16)) + + if encryption_version == _ENCRYPTION_PROTOCOL_V2: + encryption_data_length = _GCM_NONCE_LENGTH + _GCM_TAG_LENGTH + regions = math.ceil(length / _GCM_REGION_DATA_LENGTH) + return length + (regions * encryption_data_length) + + raise ValueError("Invalid encryption version specified.") + + +def get_adjusted_download_range_and_offset( + start: int, + end: int, + length: int, + encryption_data: Optional[_EncryptionData]) -> Tuple[Tuple[int, int], Tuple[int, int]]: + """ + Gets the new download range and offsets into the decrypted data for + the given user-specified range. The new download range will include all + the data needed to decrypt the user-provided range and will include only + full encryption regions. + + The offsets returned will be the offsets needed to fetch the user-requested + data out of the full decrypted data. The end offset is different based on the + encryption version. For V1, the end offset is offset from the end whereas for + V2, the end offset is the ending index into the stream. + V1: decrypted_data[start_offset : len(decrypted_data) - end_offset] + V2: decrypted_data[start_offset : end_offset] + + :param int start: The user-requested start index. + :param int end: The user-requested end index. + :param int length: The user-requested length. Only used for V1. + :param Optional[_EncryptionData] encryption_data: The encryption data to determine version and sizes. + :return: (new start, new end), (start offset, end offset) + """ + start_offset, end_offset = 0, 0 + if encryption_data is None: + return (start, end), (start_offset, end_offset) + + if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1: + if start is not None: + # Align the start of the range along a 16 byte block + start_offset = start % 16 + start -= start_offset + + # Include an extra 16 bytes for the IV if necessary + # Because of the previous offsetting, start_range will always + # be a multiple of 16. + if start > 0: + start_offset += 16 + start -= 16 + + if length is not None: + # Align the end of the range along a 16 byte block + end_offset = 15 - (end % 16) + end += end_offset + + elif encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: + start_offset, end_offset = 0, end + + nonce_length = encryption_data.encrypted_region_info.nonce_length + data_length = encryption_data.encrypted_region_info.data_length + tag_length = encryption_data.encrypted_region_info.tag_length + region_length = nonce_length + data_length + tag_length + requested_length = end - start + + if start is not None: + # Find which data region the start is in + region_num = start // data_length + # The start of the data region is different from the start of the encryption region + data_start = region_num * data_length + region_start = region_num * region_length + # Offset is based on data region + start_offset = start - data_start + # New start is the start of the encryption region + start = region_start + + if end is not None: + # Find which data region the end is in + region_num = end // data_length + end_offset = start_offset + requested_length + 1 + # New end is the end of the encryption region + end = (region_num * region_length) + region_length - 1 + + return (start, end), (start_offset, end_offset) + + +def parse_encryption_data(metadata: Dict[str, Any]) -> Optional[_EncryptionData]: + """ + Parses the encryption data out of the given blob metadata. If metadata does + not exist or there are parsing errors, this function will just return None. + + :param Dict[str, Any] metadata: The blob metadata parsed from the response. + """ + try: + return _dict_to_encryption_data(loads(metadata['encryptiondata'])) + except: # pylint: disable=bare-except + return None + + +def adjust_blob_size_for_encryption(size: int, encryption_data: Optional[_EncryptionData]) -> int: + """ + Adjusts the given blob size for encryption by subtracting the size of + the encryption data (nonce + tag). This only has an affect for encryption V2. + + :param int size: The original blob size. + :param Optional[_EncryptionData] encryption_data: The encryption data to determine version and sizes. + """ + if is_encryption_v2(encryption_data): + nonce_length = encryption_data.encrypted_region_info.nonce_length + data_length = encryption_data.encrypted_region_info.data_length + tag_length = encryption_data.encrypted_region_info.tag_length + region_length = nonce_length + data_length + tag_length + + num_regions = math.ceil(size / region_length) + metadata_size = num_regions * (nonce_length + tag_length) + return size - metadata_size + + return size + + +def _generate_encryption_data_dict(kek, cek, iv, version): + ''' + Generates and returns the encryption metadata as a dict. + + :param object kek: The key encryption key. See calling functions for more information. + :param bytes cek: The content encryption key. + :param Optional[bytes] iv: The initialization vector. Only required for AES-CBC. + :param str version: The client encryption version used. + :return: A dict containing all the encryption metadata. + :rtype: dict + ''' + # Encrypt the cek. + if version == _ENCRYPTION_PROTOCOL_V1: + wrapped_cek = kek.wrap_key(cek) + # For V2, we include the encryption version in the wrapped key. + elif version == _ENCRYPTION_PROTOCOL_V2: + # We must pad the version to 8 bytes for AES Keywrap algorithms + to_wrap = _ENCRYPTION_PROTOCOL_V2.encode().ljust(8, b'\0') + cek + wrapped_cek = kek.wrap_key(to_wrap) + + # Build the encryption_data dict. + # Use OrderedDict to comply with Java's ordering requirement. + wrapped_content_key = OrderedDict() + wrapped_content_key['KeyId'] = kek.get_kid() + wrapped_content_key['EncryptedKey'] = encode_base64(wrapped_cek) + wrapped_content_key['Algorithm'] = kek.get_key_wrap_algorithm() + + encryption_agent = OrderedDict() + encryption_agent['Protocol'] = version + + if version == _ENCRYPTION_PROTOCOL_V1: + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_CBC_256 + + elif version == _ENCRYPTION_PROTOCOL_V2: + encryption_agent['EncryptionAlgorithm'] = _EncryptionAlgorithm.AES_GCM_256 + + encrypted_region_info = OrderedDict() + encrypted_region_info['DataLength'] = _GCM_REGION_DATA_LENGTH + encrypted_region_info['NonceLength'] = _GCM_NONCE_LENGTH + + encryption_data_dict = OrderedDict() + encryption_data_dict['WrappedContentKey'] = wrapped_content_key + encryption_data_dict['EncryptionAgent'] = encryption_agent + if version == _ENCRYPTION_PROTOCOL_V1: + encryption_data_dict['ContentEncryptionIV'] = encode_base64(iv) + elif version == _ENCRYPTION_PROTOCOL_V2: + encryption_data_dict['EncryptedRegionInfo'] = encrypted_region_info + encryption_data_dict['KeyWrappingMetadata'] = {'EncryptionLibrary': 'Python ' + VERSION} + + return encryption_data_dict + + +def _dict_to_encryption_data(encryption_data_dict): + ''' + Converts the specified dictionary to an EncryptionData object for + eventual use in decryption. + + :param dict encryption_data_dict: + The dictionary containing the encryption data. + :return: an _EncryptionData object built from the dictionary. + :rtype: _EncryptionData + ''' + try: + protocol = encryption_data_dict['EncryptionAgent']['Protocol'] + if protocol not in [_ENCRYPTION_PROTOCOL_V1, _ENCRYPTION_PROTOCOL_V2]: + raise ValueError("Unsupported encryption version.") + except KeyError: + raise ValueError("Unsupported encryption version.") + wrapped_content_key = encryption_data_dict['WrappedContentKey'] + wrapped_content_key = _WrappedContentKey(wrapped_content_key['Algorithm'], + decode_base64_to_bytes(wrapped_content_key['EncryptedKey']), + wrapped_content_key['KeyId']) + + encryption_agent = encryption_data_dict['EncryptionAgent'] + encryption_agent = _EncryptionAgent(encryption_agent['EncryptionAlgorithm'], + encryption_agent['Protocol']) + + if 'KeyWrappingMetadata' in encryption_data_dict: + key_wrapping_metadata = encryption_data_dict['KeyWrappingMetadata'] + else: + key_wrapping_metadata = None + + # AES-CBC only + encryption_iv = None + if 'ContentEncryptionIV' in encryption_data_dict: + encryption_iv = decode_base64_to_bytes(encryption_data_dict['ContentEncryptionIV']) + + # AES-GCM only + region_info = None + if 'EncryptedRegionInfo' in encryption_data_dict: + encrypted_region_info = encryption_data_dict['EncryptedRegionInfo'] + region_info = _EncryptedRegionInfo(encrypted_region_info['DataLength'], + encrypted_region_info['NonceLength'], + _GCM_TAG_LENGTH) + + encryption_data = _EncryptionData(encryption_iv, + region_info, + encryption_agent, + wrapped_content_key, + key_wrapping_metadata) + + return encryption_data + + +def _generate_AES_CBC_cipher(cek, iv): + ''' + Generates and returns an encryption cipher for AES CBC using the given cek and iv. + + :param bytes[] cek: The content encryption key for the cipher. + :param bytes[] iv: The initialization vector for the cipher. + :return: A cipher for encrypting in AES256 CBC. + :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher + ''' + + backend = default_backend() + algorithm = AES(cek) + mode = CBC(iv) + return Cipher(algorithm, mode, backend) + + +def _validate_and_unwrap_cek(encryption_data, key_encryption_key=None, key_resolver=None): + ''' + Extracts and returns the content_encryption_key stored in the encryption_data object + and performs necessary validation on all parameters. + :param _EncryptionData encryption_data: + The encryption metadata of the retrieved value. + :param obj key_encryption_key: + The key_encryption_key used to unwrap the cek. Please refer to high-level service object + instance variables for more details. + :param func key_resolver: + A function used that, given a key_id, will return a key_encryption_key. Please refer + to high-level service object instance variables for more details. + :return: the content_encryption_key stored in the encryption_data object. + :rtype: bytes[] + ''' + + _validate_not_none('encrypted_key', encryption_data.wrapped_content_key.encrypted_key) + + # Validate we have the right info for the specified version + if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1: + _validate_not_none('content_encryption_IV', encryption_data.content_encryption_IV) + elif encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: + _validate_not_none('encrypted_region_info', encryption_data.encrypted_region_info) + else: + raise ValueError('Specified encryption version is not supported.') + + content_encryption_key = None + + # If the resolver exists, give priority to the key it finds. + if key_resolver is not None: + key_encryption_key = key_resolver(encryption_data.wrapped_content_key.key_id) + + _validate_not_none('key_encryption_key', key_encryption_key) + if not hasattr(key_encryption_key, 'get_kid') or not callable(key_encryption_key.get_kid): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'get_kid')) + if not hasattr(key_encryption_key, 'unwrap_key') or not callable(key_encryption_key.unwrap_key): + raise AttributeError(_ERROR_OBJECT_INVALID.format('key encryption key', 'unwrap_key')) + if encryption_data.wrapped_content_key.key_id != key_encryption_key.get_kid(): + raise ValueError('Provided or resolved key-encryption-key does not match the id of key used to encrypt.') + # Will throw an exception if the specified algorithm is not supported. + content_encryption_key = key_encryption_key.unwrap_key(encryption_data.wrapped_content_key.encrypted_key, + encryption_data.wrapped_content_key.algorithm) + + # For V2, the version is included with the cek. We need to validate it + # and remove it from the actual cek. + if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: + version_2_bytes = _ENCRYPTION_PROTOCOL_V2.encode().ljust(8, b'\0') + cek_version_bytes = content_encryption_key[:len(version_2_bytes)] + if cek_version_bytes != version_2_bytes: + raise ValueError('The encryption metadata is not valid and may have been modified.') + + # Remove version from the start of the cek. + content_encryption_key = content_encryption_key[len(version_2_bytes):] + + _validate_not_none('content_encryption_key', content_encryption_key) + + return content_encryption_key + + +def _decrypt_message(message, encryption_data, key_encryption_key=None, resolver=None): + ''' + Decrypts the given ciphertext using AES256 in CBC mode with 128 bit padding. + Unwraps the content-encryption-key using the user-provided or resolved key-encryption-key (kek). + Returns the original plaintex. + + :param str message: + The ciphertext to be decrypted. + :param _EncryptionData encryption_data: + The metadata associated with this ciphertext. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key using the string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The decrypted plaintext. + :rtype: str + ''' + _validate_not_none('message', message) + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, resolver) + + if encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V1: + if not encryption_data.content_encryption_IV: + raise ValueError("Missing required metadata for decryption.") + + cipher = _generate_AES_CBC_cipher(content_encryption_key, encryption_data.content_encryption_IV) + + # decrypt data + decrypted_data = message + decryptor = cipher.decryptor() + decrypted_data = (decryptor.update(decrypted_data) + decryptor.finalize()) + + # unpad data + unpadder = PKCS7(128).unpadder() + decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize()) + + elif encryption_data.encryption_agent.protocol == _ENCRYPTION_PROTOCOL_V2: + block_info = encryption_data.encrypted_region_info + if not block_info or not block_info.nonce_length: + raise ValueError("Missing required metadata for decryption.") + + nonce_length = encryption_data.encrypted_region_info.nonce_length + + # First bytes are the nonce + nonce = message[:nonce_length] + ciphertext_with_tag = message[nonce_length:] + + aesgcm = AESGCM(content_encryption_key) + decrypted_data = aesgcm.decrypt(nonce, ciphertext_with_tag, None) + + else: + raise ValueError('Specified encryption version is not supported.') + + return decrypted_data + + +def encrypt_blob(blob, key_encryption_key, version): + ''' + Encrypts the given blob using the given encryption protocol version. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encryption metadata. This method should + only be used when a blob is small enough for single shot upload. Encrypting larger blobs + is done as a part of the upload_data_chunks method. + + :param bytes blob: + The blob to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param str version: The client encryption version to use. + :return: A tuple of json-formatted string containing the encryption metadata and the encrypted blob data. + :rtype: (str, bytes) + ''' + + _validate_not_none('blob', blob) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + if version == _ENCRYPTION_PROTOCOL_V1: + # AES256 uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(blob) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + elif version == _ENCRYPTION_PROTOCOL_V2: + # AES256 GCM uses 256 bit (32 byte) keys and a 12 byte nonce. + content_encryption_key = os.urandom(32) + initialization_vector = None + + data = BytesIO(blob) + encryption_stream = GCMBlobEncryptionStream(content_encryption_key, data) + + encrypted_data = encryption_stream.read() + + else: + raise ValueError("Invalid encryption version specified.") + + encryption_data = _generate_encryption_data_dict(key_encryption_key, content_encryption_key, + initialization_vector, version) + encryption_data['EncryptionMode'] = 'FullBlob' + + return dumps(encryption_data), encrypted_data + + +def generate_blob_encryption_data(key_encryption_key, version): + ''' + Generates the encryption_metadata for the blob. + + :param object key_encryption_key: + The key-encryption-key used to wrap the cek associate with this blob. + :param str version: The client encryption version to use. + :return: A tuple containing the cek and iv for this blob as well as the + serialized encryption metadata for the blob. + :rtype: (bytes, Optional[bytes], str) + ''' + encryption_data = None + content_encryption_key = None + initialization_vector = None + if key_encryption_key: + _validate_key_encryption_key_wrap(key_encryption_key) + content_encryption_key = os.urandom(32) + # Initialization vector only needed for V1 + if version == _ENCRYPTION_PROTOCOL_V1: + initialization_vector = os.urandom(16) + encryption_data = _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector, + version) + encryption_data['EncryptionMode'] = 'FullBlob' + encryption_data = dumps(encryption_data) + + return content_encryption_key, initialization_vector, encryption_data + + +def decrypt_blob( # pylint: disable=too-many-locals,too-many-statements + require_encryption, + key_encryption_key, + key_resolver, + content, + start_offset, + end_offset, + response_headers): + """ + Decrypts the given blob contents and returns only the requested range. + + :param bool require_encryption: + Whether the calling blob service requires objects to be decrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param object key_resolver: + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :param bytes content: + The encrypted blob content. + :param int start_offset: + The adjusted offset from the beginning of the *decrypted* content for the caller's data. + :param int end_offset: + The adjusted offset from the end of the *decrypted* content for the caller's data. + :param Dict[str, Any] response_headers: + A dictionary of response headers from the download request. Expected to include the + 'x-ms-meta-encryptiondata' header if the blob was encrypted. + :return: The decrypted blob content. + :rtype: bytes + """ + try: + encryption_data = _dict_to_encryption_data(loads(response_headers['x-ms-meta-encryptiondata'])) + except: # pylint: disable=bare-except + if require_encryption: + raise ValueError( + 'Encryption required, but received data does not contain appropriate metatadata.' + \ + 'Data was either not encrypted or metadata has been lost.') + + return content + + algorithm = encryption_data.encryption_agent.encryption_algorithm + if algorithm not in(_EncryptionAlgorithm.AES_CBC_256, _EncryptionAlgorithm.AES_GCM_256): + raise ValueError('Specified encryption algorithm is not supported.') + + version = encryption_data.encryption_agent.protocol + if version not in (_ENCRYPTION_PROTOCOL_V1, _ENCRYPTION_PROTOCOL_V2): + raise ValueError('Specified encryption version is not supported.') + + content_encryption_key = _validate_and_unwrap_cek(encryption_data, key_encryption_key, key_resolver) + + if version == _ENCRYPTION_PROTOCOL_V1: + blob_type = response_headers['x-ms-blob-type'] + + iv = None + unpad = False + if 'content-range' in response_headers: + content_range = response_headers['content-range'] + # Format: 'bytes x-y/size' + + # Ignore the word 'bytes' + content_range = content_range.split(' ') + + content_range = content_range[1].split('-') + content_range = content_range[1].split('/') + end_range = int(content_range[0]) + blob_size = int(content_range[1]) + + if start_offset >= 16: + iv = content[:16] + content = content[16:] + start_offset -= 16 + else: + iv = encryption_data.content_encryption_IV + + if end_range == blob_size - 1: + unpad = True + else: + unpad = True + iv = encryption_data.content_encryption_IV + + if blob_type == 'PageBlob': + unpad = False + + cipher = _generate_AES_CBC_cipher(content_encryption_key, iv) + decryptor = cipher.decryptor() + + content = decryptor.update(content) + decryptor.finalize() + if unpad: + unpadder = PKCS7(128).unpadder() + content = unpadder.update(content) + unpadder.finalize() + + return content[start_offset: len(content) - end_offset] + + if version == _ENCRYPTION_PROTOCOL_V2: + # We assume the content contains only full encryption regions + total_size = len(content) + offset = 0 + + nonce_length = encryption_data.encrypted_region_info.nonce_length + data_length = encryption_data.encrypted_region_info.data_length + tag_length = encryption_data.encrypted_region_info.tag_length + region_length = nonce_length + data_length + tag_length + + decrypted_content = bytearray() + while offset < total_size: + # Process one encryption region at a time + process_size = min(region_length, total_size) + encrypted_region = content[offset:offset + process_size] + + # First bytes are the nonce + nonce = encrypted_region[:nonce_length] + ciphertext_with_tag = encrypted_region[nonce_length:] + + aesgcm = AESGCM(content_encryption_key) + decrypted_data = aesgcm.decrypt(nonce, ciphertext_with_tag, None) + decrypted_content.extend(decrypted_data) + + offset += process_size + + # Read the caller requested data from the decrypted content + return decrypted_content[start_offset:end_offset] + + +def get_blob_encryptor_and_padder(cek, iv, should_pad): + encryptor = None + padder = None + + if cek is not None and iv is not None: + cipher = _generate_AES_CBC_cipher(cek, iv) + encryptor = cipher.encryptor() + padder = PKCS7(128).padder() if should_pad else None + + return encryptor, padder + + +def encrypt_queue_message(message, key_encryption_key, version): + ''' + Encrypts the given plain text message using the given protocol version. + Wraps the generated content-encryption-key using the user-provided key-encryption-key (kek). + Returns a json-formatted string containing the encrypted message and the encryption metadata. + + :param object message: + The plain text messge to be encrypted. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + wrap_key(key)--wraps the specified key using an algorithm of the user's choice. + get_key_wrap_algorithm()--returns the algorithm used to wrap the specified symmetric key. + get_kid()--returns a string key id for this key-encryption-key. + :param str version: The client encryption version to use. + :return: A json-formatted string containing the encrypted message and the encryption metadata. + :rtype: str + ''' + + _validate_not_none('message', message) + _validate_not_none('key_encryption_key', key_encryption_key) + _validate_key_encryption_key_wrap(key_encryption_key) + + # Queue encoding functions all return unicode strings, and encryption should + # operate on binary strings. + message = message.encode('utf-8') + + if version == _ENCRYPTION_PROTOCOL_V1: + # AES256 CBC uses 256 bit (32 byte) keys and always with 16 byte blocks + content_encryption_key = os.urandom(32) + initialization_vector = os.urandom(16) + + cipher = _generate_AES_CBC_cipher(content_encryption_key, initialization_vector) + + # PKCS7 with 16 byte blocks ensures compatibility with AES. + padder = PKCS7(128).padder() + padded_data = padder.update(message) + padder.finalize() + + # Encrypt the data. + encryptor = cipher.encryptor() + encrypted_data = encryptor.update(padded_data) + encryptor.finalize() + + elif version == _ENCRYPTION_PROTOCOL_V2: + # AES256 GCM uses 256 bit (32 byte) keys and a 12 byte nonce. + content_encryption_key = os.urandom(32) + initialization_vector = None + + # The nonce MUST be different for each key + nonce = os.urandom(12) + aesgcm = AESGCM(content_encryption_key) + + # Returns ciphertext + tag + cipertext_with_tag = aesgcm.encrypt(nonce, message, None) + encrypted_data = nonce + cipertext_with_tag + + else: + raise ValueError("Invalid encryption version specified.") + + # Build the dictionary structure. + queue_message = {'EncryptedMessageContents': encode_base64(encrypted_data), + 'EncryptionData': _generate_encryption_data_dict(key_encryption_key, + content_encryption_key, + initialization_vector, + version)} + + return dumps(queue_message) + + +def decrypt_queue_message(message, response, require_encryption, key_encryption_key, resolver): + ''' + Returns the decrypted message contents from an EncryptedQueueMessage. + If no encryption metadata is present, will return the unaltered message. + :param str message: + The JSON formatted QueueEncryptedMessage contents with all associated metadata. + :param bool require_encryption: + If set, will enforce that the retrieved messages are encrypted and decrypt them. + :param object key_encryption_key: + The user-provided key-encryption-key. Must implement the following methods: + unwrap_key(key, algorithm) + - returns the unwrapped form of the specified symmetric key usingthe string-specified algorithm. + get_kid() + - returns a string key id for this key-encryption-key. + :param function resolver(kid): + The user-provided key resolver. Uses the kid string to return a key-encryption-key + implementing the interface defined above. + :return: The plain text message from the queue message. + :rtype: str + ''' + response = response.http_response + + try: + message = loads(message) + + encryption_data = _dict_to_encryption_data(message['EncryptionData']) + decoded_data = decode_base64_to_bytes(message['EncryptedMessageContents']) + except (KeyError, ValueError): + # Message was not json formatted and so was not encrypted + # or the user provided a json formatted message + # or the metadata was malformed. + if require_encryption: + raise ValueError( + 'Encryption required, but received message does not contain appropriate metatadata. ' + \ + 'Message was either not encrypted or metadata was incorrect.') + + return message + try: + return _decrypt_message(decoded_data, encryption_data, key_encryption_key, resolver).decode('utf-8') + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=response, + error=error) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/__init__.py new file mode 100644 index 00000000000..2064cc37492 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/__init__.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage import AzureBlobStorage + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = ["AzureBlobStorage"] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_azure_blob_storage.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_azure_blob_storage.py new file mode 100644 index 00000000000..1b841a23361 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_azure_blob_storage.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any + +from azure.core import PipelineClient +from azure.core.rest import HttpRequest, HttpResponse + +from . import models +from ._configuration import AzureBlobStorageConfiguration +from ._serialization import Deserializer, Serializer +from .operations import ( + AppendBlobOperations, + BlobOperations, + BlockBlobOperations, + ContainerOperations, + PageBlobOperations, + ServiceOperations, +) + + +class AzureBlobStorage: # pylint: disable=client-accepts-api-version-keyword + """AzureBlobStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.blob.operations.ServiceOperations + :ivar container: ContainerOperations operations + :vartype container: azure.storage.blob.operations.ContainerOperations + :ivar blob: BlobOperations operations + :vartype blob: azure.storage.blob.operations.BlobOperations + :ivar page_blob: PageBlobOperations operations + :vartype page_blob: azure.storage.blob.operations.PageBlobOperations + :ivar append_blob: AppendBlobOperations operations + :vartype append_blob: azure.storage.blob.operations.AppendBlobOperations + :ivar block_blob: BlockBlobOperations operations + :vartype block_blob: azure.storage.blob.operations.BlockBlobOperations + :param url: The URL of the service account, container, or blob that is the target of the + desired operation. Required. + :type url: str + :param base_url: Service URL. Required. Default value is "". + :type base_url: str + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2021-12-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + """ + + def __init__( # pylint: disable=missing-client-constructor-parameter-credential + self, url: str, base_url: str = "", **kwargs: Any + ) -> None: + self._config = AzureBlobStorageConfiguration(url=url, **kwargs) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations(self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations(self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations(self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations(self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations(self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + def close(self): + # type: () -> None + self._client.close() + + def __enter__(self): + # type: () -> AzureBlobStorage + self._client.__enter__() + return self + + def __exit__(self, *exc_details): + # type: (Any) -> None + self._client.__exit__(*exc_details) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_configuration.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_configuration.py new file mode 100644 index 00000000000..bde75f02cd5 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_configuration.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + + +class AzureBlobStorageConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for AzureBlobStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the target of the + desired operation. Required. + :type url: str + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2021-12-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + """ + + def __init__(self, url: str, **kwargs: Any) -> None: + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + version = kwargs.pop("version", "2021-12-02") # type: str + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + self.url = url + self.version = version + kwargs.setdefault("sdk_moniker", "azureblobstorage/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure( + self, **kwargs # type: Any + ): + # type: (...) -> None + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_patch.py new file mode 100644 index 00000000000..f99e77fef98 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_patch.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# This file is used for handwritten extensions to the generated code. Example: +# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md +def patch_sdk(): + pass diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_serialization.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_serialization.py new file mode 100644 index 00000000000..7c1dedb5133 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_serialization.py @@ -0,0 +1,1970 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pylint: skip-file + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote # type: ignore +import xml.etree.ElementTree as ET + +import isodate + +from typing import Dict, Any, cast, TYPE_CHECKING + +from azure.core.exceptions import DeserializationError, SerializationError, raise_with_traceback + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +if TYPE_CHECKING: + from typing import Optional, Union, AnyStr, IO, Mapping + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data, content_type=None): + # type: (Optional[Union[AnyStr, IO]], Optional[str]) -> Any + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise_with_traceback(DeserializationError, "XML is invalid") + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes, headers): + # type: (Optional[Union[AnyStr, IO]], Mapping) -> Any + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +try: + basestring # type: ignore + unicode_str = unicode # type: ignore +except NameError: + basestring = str # type: ignore + unicode_str = str # type: ignore + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0.""" + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation.""" + return "Z" + + def dst(self, dt): + """No daylight saving for UTC.""" + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc # type: ignore +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes=None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize(self, target_obj, data_type=None, **kwargs): + """Serialize data into a string according to type. + + :param target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() + try: + attributes = target_obj._attribute_map + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) + continue + if xml_desc.get("text", False): + serialized.text = new_attr + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = unicode_str(new_attr) + serialized.append(local_node) + else: # JSON + for k in reversed(keys): + unflattened = {k: new_attr} + new_attr = unflattened + + _new_attr = new_attr + _serialized = serialized + for k in keys: + if k not in _serialized: + _serialized.update(_new_attr) + _new_attr = _new_attr[k] + _serialized = _serialized[k] + except ValueError: + continue + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise_with_traceback(SerializationError, msg, err) + else: + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + """ + + # Just in case this is a dict + internal_data_type = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) + except DeserializationError as err: + raise_with_traceback(SerializationError, "Unable to build a model: " + str(err), err) + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + data = [self.serialize_data(d, internal_data_type, **kwargs) if d is not None else "" for d in data] + if not kwargs.get("skip_quote", False): + data = [quote(str(d), safe="") for d in data] + return str(self.serialize_iter(data, internal_data_type, **kwargs)) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :param bool required: Whether it's essential that the data not be + empty or None + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + elif data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise_with_traceback(SerializationError, msg.format(data, data_type), err) + else: + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param data: Object to be serialized. + :param str data_type: Type of object in the iterable. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param data: Object to be serialized. + :rtype: str + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + else: + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list attr: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param bool required: Whether the objects in the iterable must + not be None or empty. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + :rtype: list, str + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError: + serialized.append(None) + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :param bool required: Whether the objects in the dictionary must + not be None or empty. + :rtype: dict + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is unicode_str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + elif obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) + return result + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) + + @staticmethod + def serialize_bytearray(attr, **kwargs): + """Serialize bytearray into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): + """Serialize str into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): + """Serialize Decimal object to float. + + :param attr: Object to be serialized. + :rtype: float + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): + """Serialize long (Py2) or int (Py3). + + :param attr: Object to be serialized. + :rtype: int/long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError: + raise TypeError("RFC1123 object must be valid Datetime object.") + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(SerializationError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + @staticmethod + def serialize_unix(attr, **kwargs): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError: + raise TypeError("Unix time object must be valid Datetime object.") + + +def rest_key_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key.""" + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + else: + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + else: # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes=None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, basestring): + return self.deserialize_data(data, response) + elif isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None: + return data + try: + attributes = response._attribute_map + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name + raise_with_traceback(DeserializationError, msg, err) + else: + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deseralize. + """ + if target is None: + return None, None + + if isinstance(target, basestring): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deseralize. + :param str content_type: Swagger "produces" if available. + """ + try: + return self(target_obj, data, content_type=content_type) + except: + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param raw_data: Data to be processed. + :param content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param response: The response model class. + :param d_attrs: The deserialized response attributes. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [k for k, v in response._validation.items() if v.get("readonly")] + const = [k for k, v in response._validation.items() if v.get("constant")] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) + raise DeserializationError(msg + str(err)) + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) + + def deserialize_data(self, data, data_type): + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise_with_traceback(DeserializationError, msg, err) + else: + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, basestring): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + else: + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + else: + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + elif isinstance(attr, basestring): + if attr.lower() in ["true", "1"]: + return True + elif attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): + return data + except NameError: + return str(data) + else: + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + # https://github.com/Azure/azure-rest-api-specs/issues/141 + try: + return list(enum_obj.__members__.values())[data] + except IndexError: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) + attr = attr + padding + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :rtype: Decimal + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(attr) + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise_with_traceback(DeserializationError, msg, err) + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise_with_traceback(DeserializationError, msg, err) + else: + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) + try: + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_vendor.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_vendor.py new file mode 100644 index 00000000000..9aad73fc743 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/_vendor.py @@ -0,0 +1,27 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from azure.core.pipeline.transport import HttpRequest + + +def _convert_request(request, files=None): + data = request.content if not files else None + request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) + if files: + request.set_formdata_body(files) + return request + + +def _format_url_section(template, **kwargs): + components = template.split("/") + while components: + try: + return template.format(**kwargs) + except KeyError as key: + formatted_components = template.split("/") + components = [c for c in formatted_components if "{}".format(key.args[0]) not in c] + template = "/".join(components) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/__init__.py new file mode 100644 index 00000000000..2064cc37492 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/__init__.py @@ -0,0 +1,21 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_blob_storage import AzureBlobStorage + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = ["AzureBlobStorage"] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_azure_blob_storage.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_azure_blob_storage.py new file mode 100644 index 00000000000..8fbb8e5a4a1 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_azure_blob_storage.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable + +from azure.core import AsyncPipelineClient +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .. import models +from .._serialization import Deserializer, Serializer +from ._configuration import AzureBlobStorageConfiguration +from .operations import ( + AppendBlobOperations, + BlobOperations, + BlockBlobOperations, + ContainerOperations, + PageBlobOperations, + ServiceOperations, +) + + +class AzureBlobStorage: # pylint: disable=client-accepts-api-version-keyword + """AzureBlobStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.blob.aio.operations.ServiceOperations + :ivar container: ContainerOperations operations + :vartype container: azure.storage.blob.aio.operations.ContainerOperations + :ivar blob: BlobOperations operations + :vartype blob: azure.storage.blob.aio.operations.BlobOperations + :ivar page_blob: PageBlobOperations operations + :vartype page_blob: azure.storage.blob.aio.operations.PageBlobOperations + :ivar append_blob: AppendBlobOperations operations + :vartype append_blob: azure.storage.blob.aio.operations.AppendBlobOperations + :ivar block_blob: BlockBlobOperations operations + :vartype block_blob: azure.storage.blob.aio.operations.BlockBlobOperations + :param url: The URL of the service account, container, or blob that is the target of the + desired operation. Required. + :type url: str + :param base_url: Service URL. Required. Default value is "". + :type base_url: str + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2021-12-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + """ + + def __init__( # pylint: disable=missing-client-constructor-parameter-credential + self, url: str, base_url: str = "", **kwargs: Any + ) -> None: + self._config = AzureBlobStorageConfiguration(url=url, **kwargs) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) + self.container = ContainerOperations(self._client, self._config, self._serialize, self._deserialize) + self.blob = BlobOperations(self._client, self._config, self._serialize, self._deserialize) + self.page_blob = PageBlobOperations(self._client, self._config, self._serialize, self._deserialize) + self.append_blob = AppendBlobOperations(self._client, self._config, self._serialize, self._deserialize) + self.block_blob = BlockBlobOperations(self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "AzureBlobStorage": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details) -> None: + await self._client.__aexit__(*exc_details) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_configuration.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_configuration.py new file mode 100644 index 00000000000..3ba2c071221 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_configuration.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import Any + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +VERSION = "unknown" + + +class AzureBlobStorageConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for AzureBlobStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, container, or blob that is the target of the + desired operation. Required. + :type url: str + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2021-12-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + """ + + def __init__(self, url: str, **kwargs: Any) -> None: + super(AzureBlobStorageConfiguration, self).__init__(**kwargs) + version = kwargs.pop("version", "2021-12-02") # type: str + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + self.url = url + self.version = version + kwargs.setdefault("sdk_moniker", "azureblobstorage/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_patch.py new file mode 100644 index 00000000000..f99e77fef98 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/_patch.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# This file is used for handwritten extensions to the generated code. Example: +# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md +def patch_sdk(): + pass diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/__init__.py new file mode 100644 index 00000000000..f8feb32687a --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._container_operations import ContainerOperations +from ._blob_operations import BlobOperations +from ._page_blob_operations import PageBlobOperations +from ._append_blob_operations import AppendBlobOperations +from ._block_blob_operations import BlockBlobOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ServiceOperations", + "ContainerOperations", + "BlobOperations", + "PageBlobOperations", + "AppendBlobOperations", + "BlockBlobOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_append_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_append_blob_operations.py new file mode 100644 index 00000000000..734f1513296 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_append_blob_operations.py @@ -0,0 +1,725 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._append_blob_operations import ( + build_append_block_from_url_request, + build_append_block_request, + build_create_request, + build_seal_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class AppendBlobOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.aio.AzureBlobStorage`'s + :attr:`append_blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def create( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "AppendBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "AppendBlob")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_create_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + blob_type=blob_type, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def append_block( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob. + The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to + AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. Default value is None. + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "appendblock". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "appendblock")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _max_size = None + _append_position = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + _max_size = append_position_access_conditions.max_size + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = body + + request = build_append_block_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + lease_id=_lease_id, + max_size=_max_size, + append_position=_append_position, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.append_block.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-append-offset"] = self._deserialize( + "str", response.headers.get("x-ms-blob-append-offset") + ) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def append_block_from_url( # pylint: disable=inconsistent-return-statements + self, + source_url: str, + content_length: int, + source_range: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob + where the contents are read from a source url. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. Required. + :type source_url: str + :param content_length: The length of the request. Required. + :type content_length: int + :param source_range: Bytes of source data in the specified range. Default value is None. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_contentcrc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. Default value is None. + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword comp: comp. Default value is "appendblock". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "appendblock")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _max_size = None + _append_position = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + _max_size = append_position_access_conditions.max_size + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_append_block_from_url_request( + url=self._config.url, + source_url=source_url, + content_length=content_length, + source_range=source_range, + source_content_md5=source_content_md5, + source_contentcrc64=source_contentcrc64, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + lease_id=_lease_id, + max_size=_max_size, + append_position=_append_position, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + request_id_parameter=request_id_parameter, + copy_source_authorization=copy_source_authorization, + comp=comp, + version=self._config.version, + template_url=self.append_block_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-append-offset"] = self._deserialize( + "str", response.headers.get("x-ms-blob-append-offset") + ) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def seal( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on + version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Parameter group. Default value is None. + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :keyword comp: comp. Default value is "seal". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "seal")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _append_position = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + + request = build_seal_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + append_position=_append_position, + comp=comp, + version=self._config.version, + template_url=self.seal.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + + if cls: + return cls(pipeline_response, None, response_headers) + + seal.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_blob_operations.py new file mode 100644 index 00000000000..92c70b4d5f8 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_blob_operations.py @@ -0,0 +1,3156 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, AsyncIterator, Callable, Dict, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._blob_operations import ( + build_abort_copy_from_url_request, + build_acquire_lease_request, + build_break_lease_request, + build_change_lease_request, + build_copy_from_url_request, + build_create_snapshot_request, + build_delete_immutability_policy_request, + build_delete_request, + build_download_request, + build_get_account_info_request, + build_get_properties_request, + build_get_tags_request, + build_query_request, + build_release_lease_request, + build_renew_lease_request, + build_set_expiry_request, + build_set_http_headers_request, + build_set_immutability_policy_request, + build_set_legal_hold_request, + build_set_metadata_request, + build_set_tags_request, + build_set_tier_request, + build_start_copy_from_url_request, + build_undelete_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class BlobOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.aio.AzureBlobStorage`'s + :attr:`blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def download( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + range_get_content_crc64: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + """The Download operation reads or downloads a blob from the system, including its metadata and + properties. You can also call Download to read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param range_get_content_md5: When set to true and specified together with the Range, the + service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB + in size. Default value is None. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified together with the Range, the + service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 + MB in size. Default value is None. + :type range_get_content_crc64: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Async iterator of the response bytes or the result of cls(response) + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[AsyncIterator[bytes]] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_download_request( + url=self._config.url, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + range=range, + lease_id=_lease_id, + range_get_content_md5=range_get_content_md5, + range_get_content_crc64=range_get_content_crc64, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + version=self._config.version, + template_url=self.download.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-creation-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-creation-time") + ) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id")) + response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["x-ms-is-current-version"] = self._deserialize( + "bool", response.headers.get("x-ms-is-current-version") + ) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + response_headers["x-ms-last-access-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-last-access-time") + ) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-creation-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-creation-time") + ) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id")) + response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["x-ms-is-current-version"] = self._deserialize( + "bool", response.headers.get("x-ms-is-current-version") + ) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + response_headers["x-ms-last-access-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-last-access-time") + ) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + download.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def get_properties( # pylint: disable=inconsistent-return-statements + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_get_properties_request( + url=self._config.url, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-creation-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-creation-time") + ) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id")) + response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or")) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress")) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-incremental-copy"] = self._deserialize( + "bool", response.headers.get("x-ms-incremental-copy") + ) + response_headers["x-ms-copy-destination-snapshot"] = self._deserialize( + "str", response.headers.get("x-ms-copy-destination-snapshot") + ) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition")) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier")) + response_headers["x-ms-access-tier-inferred"] = self._deserialize( + "bool", response.headers.get("x-ms-access-tier-inferred") + ) + response_headers["x-ms-archive-status"] = self._deserialize("str", response.headers.get("x-ms-archive-status")) + response_headers["x-ms-access-tier-change-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-access-tier-change-time") + ) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["x-ms-is-current-version"] = self._deserialize( + "bool", response.headers.get("x-ms-is-current-version") + ) + response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count")) + response_headers["x-ms-expiry-time"] = self._deserialize("rfc-1123", response.headers.get("x-ms-expiry-time")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + response_headers["x-ms-rehydrate-priority"] = self._deserialize( + "str", response.headers.get("x-ms-rehydrate-priority") + ) + response_headers["x-ms-last-access-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-last-access-time") + ) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def delete( # pylint: disable=inconsistent-return-statements + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, + request_id_parameter: Optional[str] = None, + blob_delete_type: str = "Permanent", + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is + permanently removed from the storage account. If the storage account's soft delete feature is + enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for the number of days + specified by the DeleteRetentionPolicy section of [Storage service properties] + (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's + data is permanently removed from the storage account. Note that you continue to be charged for + the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and + specify the "include=deleted" query parameter to discover which blobs and snapshots have been + soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other + operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code + of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the + following two options: include: Delete the base blob and all of its snapshots. only: Delete + only the blob's snapshots and not the blob itself. Known values are: "include" and "only". + Default value is None. + :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to + permanently delete a blob if blob soft delete is enabled. Known values are "Permanent" and + None. Default value is "Permanent". + :type blob_delete_type: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_delete_request( + url=self._config.url, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + lease_id=_lease_id, + delete_snapshots=delete_snapshots, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_delete_type=blob_delete_type, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def undelete( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "undelete". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "undelete")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_undelete_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.undelete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def set_expiry( # pylint: disable=inconsistent-return-statements + self, + expiry_options: Union[str, "_models.BlobExpiryOptions"], + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + expires_on: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. Known values are: + "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required. + :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. Default value is None. + :type expires_on: str + :keyword comp: comp. Default value is "expiry". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "expiry")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_set_expiry_request( + url=self._config.url, + expiry_options=expiry_options, + timeout=timeout, + request_id_parameter=request_id_parameter, + expires_on=expires_on, + comp=comp, + version=self._config.version, + template_url=self.set_expiry.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def set_http_headers( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_cache_control = None + _blob_content_type = None + _blob_content_md5 = None + _blob_content_encoding = None + _blob_content_language = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _blob_content_disposition = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_set_http_headers_request( + url=self._config.url, + timeout=timeout, + blob_cache_control=_blob_cache_control, + blob_content_type=_blob_content_type, + blob_content_md5=_blob_content_md5, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + blob_content_disposition=_blob_content_disposition, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.set_http_headers.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def set_immutability_policy( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set Immutability Policy operation sets the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "immutabilityPolicies". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_set_immutability_policy_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + if_unmodified_since=_if_unmodified_since, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + comp=comp, + version=self._config.version, + template_url=self.set_immutability_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_immutability_policy.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def delete_immutability_policy( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "immutabilityPolicies". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_delete_immutability_policy_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.delete_immutability_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete_immutability_policy.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def set_legal_hold( # pylint: disable=inconsistent-return-statements + self, legal_hold: bool, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """The Set Legal Hold operation sets a legal hold on the blob. + + :param legal_hold: Specified if a legal hold should be set on the blob. Required. + :type legal_hold: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "legalhold". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "legalhold")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_set_legal_hold_request( + url=self._config.url, + legal_hold=legal_hold, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.set_legal_hold.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_legal_hold.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or + more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def release_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def renew_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "renew". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_renew_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.renew_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Required. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + proposed_lease_id=proposed_lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. Default value is None. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + break_period=break_period, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def create_snapshot( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword comp: comp. Default value is "snapshot". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "snapshot")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_create_snapshot_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.create_snapshot.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def start_copy_from_url( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + seal_blob: Optional[bool] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Start Copy From URL operation copies a blob or an internet resource to a new blob. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. Known values are: "High" and "Standard". Default value is None. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. Service version + 2019-12-12 and newer. Default value is None. + :type seal_blob: bool + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_start_copy_from_url_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + metadata=metadata, + tier=tier, + rehydrate_priority=rehydrate_priority, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + source_if_tags=_source_if_tags, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + seal_blob=seal_blob, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + version=self._config.version, + template_url=self.start_copy_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def copy_from_url( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + copy_source_tags: Optional[Union[str, "_models.BlobCopySourceTags"]] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + **kwargs: Any + ) -> None: + """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not + return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param copy_source_tags: Optional, default 'replace'. Indicates if source tags should be + copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and + "COPY". Default value is None. + :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword x_ms_requires_sync: This header indicates that this is a synchronous Copy Blob From + URL instead of a Asynchronous Copy Blob. Default value is "true". Note that overriding this + default value may result in unsupported behavior. + :paramtype x_ms_requires_sync: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + x_ms_requires_sync = kwargs.pop("x_ms_requires_sync", _headers.pop("x-ms-requires-sync", "true")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + _encryption_scope = None + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + + request = build_copy_from_url_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + metadata=metadata, + tier=tier, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + source_content_md5=source_content_md5, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + copy_source_authorization=copy_source_authorization, + encryption_scope=_encryption_scope, + copy_source_tags=copy_source_tags, + x_ms_requires_sync=x_ms_requires_sync, + version=self._config.version, + template_url=self.copy_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def abort_copy_from_url( # pylint: disable=inconsistent-return-statements + self, + copy_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a + destination blob with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + Blob operation. Required. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword comp: comp. Default value is "copy". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword copy_action_abort_constant: Copy action. Default value is "abort". Note that + overriding this default value may result in unsupported behavior. + :paramtype copy_action_abort_constant: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "copy")) # type: str + copy_action_abort_constant = kwargs.pop( + "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort") + ) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_abort_copy_from_url_request( + url=self._config.url, + copy_id=copy_id, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + comp=comp, + copy_action_abort_constant=copy_action_abort_constant, + version=self._config.version, + template_url=self.abort_copy_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def set_tier( # pylint: disable=inconsistent-return-statements + self, + tier: Union[str, "_models.AccessTierRequired"], + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a + premium storage account and on a block blob in a blob storage account (locally redundant + storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not + update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. Known values are: "P4", "P6", "P10", + "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and "Cold". + Required. + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. Known values are: "High" and "Standard". Default value is None. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "tier". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tier")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + + request = build_set_tier_request( + url=self._config.url, + tier=tier, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + rehydrate_priority=rehydrate_priority, + request_id_parameter=request_id_parameter, + lease_id=_lease_id, + if_tags=_if_tags, + comp=comp, + version=self._config.version, + template_url=self.set_tier.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if response.status_code == 202: + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tier.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Returns the sku name and account kind. + + :keyword restype: restype. Default value is "account". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_get_account_info_request( + url=self._config.url, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_account_info.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name")) + response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def query( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + query_request: Optional[_models.QueryRequest] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + """The Query operation enables users to select/project on blob data by providing simple query + expressions. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param query_request: the query request. Default value is None. + :type query_request: ~azure.storage.blob.models.QueryRequest + :keyword comp: comp. Default value is "query". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Async iterator of the response bytes or the result of cls(response) + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "query")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[AsyncIterator[bytes]] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if query_request is not None: + _content = self._serialize.body(query_request, "QueryRequest", is_xml=True) + else: + _content = None + + request = build_query_request( + url=self._config.url, + snapshot=snapshot, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.query.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + query.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def get_tags( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> _models.BlobTags: + """The Get Tags operation enables users to get the tags associated with a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword comp: comp. Default value is "tags". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlobTags or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tags")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.BlobTags] + + _if_tags = None + _lease_id = None + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_tags_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + snapshot=snapshot, + version_id=version_id, + if_tags=_if_tags, + lease_id=_lease_id, + comp=comp, + version=self._config.version, + template_url=self.get_tags.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("BlobTags", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_tags.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def set_tags( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + version_id: Optional[str] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + tags: Optional[_models.BlobTags] = None, + **kwargs: Any + ) -> None: + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param tags: Blob tags. Default value is None. + :type tags: ~azure.storage.blob.models.BlobTags + :keyword comp: comp. Default value is "tags". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tags")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_tags = None + _lease_id = None + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if tags is not None: + _content = self._serialize.body(tags, "BlobTags", is_xml=True) + else: + _content = None + + request = build_set_tags_request( + url=self._config.url, + timeout=timeout, + version_id=version_id, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + request_id_parameter=request_id_parameter, + if_tags=_if_tags, + lease_id=_lease_id, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_tags.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tags.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_block_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_block_blob_operations.py new file mode 100644 index 00000000000..b3ac51917d5 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_block_blob_operations.py @@ -0,0 +1,1140 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._block_blob_operations import ( + build_commit_block_list_request, + build_get_block_list_request, + build_put_blob_from_url_request, + build_stage_block_from_url_request, + build_stage_block_request, + build_upload_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class BlockBlobOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.aio.AzureBlobStorage`'s + :attr:`block_blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def upload( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + transactional_content_crc64: Optional[bytes] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Upload Block Blob operation updates the content of an existing block blob. Updating an + existing block blob overwrites any existing metadata on the blob. Partial updates are not + supported with Put Blob; the content of the existing blob is overwritten with the content of + the new blob. To perform a partial update of the content of a block blob, use the Put Block + List operation. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "BlockBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = body + + request = build_upload_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + tier=tier, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + transactional_content_crc64=transactional_content_crc64, + blob_type=blob_type, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.upload.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def put_blob_from_url( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + copy_source: str, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + blob_tags_string: Optional[str] = None, + copy_source_blob_properties: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + copy_source_tags: Optional[Union[str, "_models.BlobCopySourceTags"]] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are + read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial + updates are not supported with Put Blob from URL; the content of an existing blob is + overwritten with the content of the new blob. To perform partial updates to a block blob’s + contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. + + :param content_length: The length of the request. Required. + :type content_length: int + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param copy_source_blob_properties: Optional, default is true. Indicates if properties from + the source blob should be copied. Default value is None. + :type copy_source_blob_properties: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param copy_source_tags: Optional, default 'replace'. Indicates if source tags should be + copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and + "COPY". Default value is None. + :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "BlockBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_put_blob_from_url_request( + url=self._config.url, + content_length=content_length, + copy_source=copy_source, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + tier=tier, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + source_if_tags=_source_if_tags, + request_id_parameter=request_id_parameter, + source_content_md5=source_content_md5, + blob_tags_string=blob_tags_string, + copy_source_blob_properties=copy_source_blob_properties, + copy_source_authorization=copy_source_authorization, + copy_source_tags=copy_source_tags, + blob_type=blob_type, + version=self._config.version, + template_url=self.put_blob_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + put_blob_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def stage_block( # pylint: disable=inconsistent-return-statements + self, + block_id: str, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + **kwargs: Any + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. Required. + :type block_id: str + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword comp: comp. Default value is "block". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "block")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + _content = body + + request = build_stage_block_request( + url=self._config.url, + block_id=block_id, + content_length=content_length, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + request_id_parameter=request_id_parameter, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.stage_block.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def stage_block_from_url( # pylint: disable=inconsistent-return-statements + self, + block_id: str, + content_length: int, + source_url: str, + source_range: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob where the + contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. Required. + :type block_id: str + :param content_length: The length of the request. Required. + :type content_length: int + :param source_url: Specify a URL to the copy source. Required. + :type source_url: str + :param source_range: Bytes of source data in the specified range. Default value is None. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_contentcrc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword comp: comp. Default value is "block". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "block")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_stage_block_from_url_request( + url=self._config.url, + block_id=block_id, + content_length=content_length, + source_url=source_url, + source_range=source_range, + source_content_md5=source_content_md5, + source_contentcrc64=source_contentcrc64, + timeout=timeout, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + lease_id=_lease_id, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + request_id_parameter=request_id_parameter, + copy_source_authorization=copy_source_authorization, + comp=comp, + version=self._config.version, + template_url=self.stage_block_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def commit_block_list( # pylint: disable=inconsistent-return-statements + self, + blocks: _models.BlockLookupList, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Commit Block List operation writes a blob by specifying the list of block IDs that make up + the blob. In order to be written as part of a blob, a block must have been successfully written + to the server in a prior Put Block operation. You can call Put Block List to update a blob by + uploading only those blocks that have changed, then committing the new and existing blocks + together. You can do this by specifying whether to commit a block from the committed block list + or from the uncommitted block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: Blob Blocks. Required. + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "blocklist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blocklist")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_cache_control = None + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = self._serialize.body(blocks, "BlockLookupList", is_xml=True) + + request = build_commit_block_list_request( + url=self._config.url, + timeout=timeout, + blob_cache_control=_blob_cache_control, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + tier=tier, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.commit_block_list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + commit_block_list.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def get_block_list( + self, + snapshot: Optional[str] = None, + list_type: Union[str, "_models.BlockListType"] = "committed", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> _models.BlockList: + """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a + block blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param list_type: Specifies whether to return the list of committed blocks, the list of + uncommitted blocks, or both lists together. Known values are: "committed", "uncommitted", and + "all". Default value is "committed". + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "blocklist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlockList or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blocklist")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.BlockList] + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + + request = build_get_block_list_request( + url=self._config.url, + snapshot=snapshot, + list_type=list_type, + timeout=timeout, + lease_id=_lease_id, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.get_block_list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-blob-content-length"] = self._deserialize( + "int", response.headers.get("x-ms-blob-content-length") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("BlockList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_block_list.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_container_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_container_operations.py new file mode 100644 index 00000000000..6326516ba7f --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_container_operations.py @@ -0,0 +1,1868 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterator, Callable, Dict, IO, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._container_operations import ( + build_acquire_lease_request, + build_break_lease_request, + build_change_lease_request, + build_create_request, + build_delete_request, + build_filter_blobs_request, + build_get_access_policy_request, + build_get_account_info_request, + build_get_properties_request, + build_list_blob_flat_segment_request, + build_list_blob_hierarchy_segment_request, + build_release_lease_request, + build_rename_request, + build_renew_lease_request, + build_restore_request, + build_set_access_policy_request, + build_set_metadata_request, + build_submit_batch_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ContainerOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.aio.AzureBlobStorage`'s + :attr:`container` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def create( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + container_cpk_scope_info: Optional[_models.ContainerCpkScopeInfo] = None, + **kwargs: Any + ) -> None: + """creates a new container under the specified account. If the container with the same name + already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. Known values are: "container" and "blob". Default value is None. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param container_cpk_scope_info: Parameter group. Default value is None. + :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _default_encryption_scope = None + _prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + _default_encryption_scope = container_cpk_scope_info.default_encryption_scope + _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + + request = build_create_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + access=access, + request_id_parameter=request_id_parameter, + default_encryption_scope=_default_encryption_scope, + prevent_encryption_scope_override=_prevent_encryption_scope_override, + restype=restype, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def get_properties( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """returns all user-defined metadata and system properties for the specified container. The data + returned does not include the container's list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_properties_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + restype=restype, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-public-access"] = self._deserialize( + "str", response.headers.get("x-ms-blob-public-access") + ) + response_headers["x-ms-has-immutability-policy"] = self._deserialize( + "bool", response.headers.get("x-ms-has-immutability-policy") + ) + response_headers["x-ms-has-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-has-legal-hold")) + response_headers["x-ms-default-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-default-encryption-scope") + ) + response_headers["x-ms-deny-encryption-scope-override"] = self._deserialize( + "bool", response.headers.get("x-ms-deny-encryption-scope-override") + ) + response_headers["x-ms-immutable-storage-with-versioning-enabled"] = self._deserialize( + "bool", response.headers.get("x-ms-immutable-storage-with-versioning-enabled") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def delete( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """operation marks the specified container for deletion. The container and any blobs contained + within it are later deleted during garbage collection. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_delete_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + restype=restype, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """operation sets one or more user-defined name-value pairs for the specified container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + metadata=metadata, + if_modified_since=_if_modified_since, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def get_access_policy( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> List[_models.SignedIdentifier]: + """gets the permissions for the specified container. The permissions indicate whether container + data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[List[_models.SignedIdentifier]] + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-blob-public-access"] = self._deserialize( + "str", response.headers.get("x-ms-blob-public-access") + ) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("[SignedIdentifier]", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_access_policy.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def set_access_policy( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + container_acl: Optional[List[_models.SignedIdentifier]] = None, + **kwargs: Any + ) -> None: + """sets the permissions for the specified container. The permissions indicate whether blobs in a + container may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. Known values are: "container" and "blob". Default value is None. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param container_acl: the acls for the container. Default value is None. + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True, "itemsName": "SignedIdentifier"}} + if container_acl is not None: + _content = self._serialize.body( + container_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt + ) + else: + _content = None + + request = build_set_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + access=access, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def restore( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_container_name: Optional[str] = None, + deleted_container_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of + the deleted container to restore. Default value is None. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the + version of the deleted container to restore. Default value is None. + :type deleted_container_version: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "undelete". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "undelete")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_restore_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + deleted_container_name=deleted_container_name, + deleted_container_version=deleted_container_version, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.restore.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def rename( # pylint: disable=inconsistent-return-statements + self, + source_container_name: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + source_lease_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """Renames an existing container. + + :param source_container_name: Required. Specifies the name of the container to rename. + Required. + :type source_container_name: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. Default value is None. + :type source_lease_id: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "rename". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "rename")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_rename_request( + url=self._config.url, + source_container_name=source_container_name, + timeout=timeout, + request_id_parameter=request_id_parameter, + source_lease_id=source_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.rename.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def submit_batch( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "batch". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Async iterator of the response bytes or the result of cls(response) + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "batch")) # type: str + multipart_content_type = kwargs.pop( + "multipart_content_type", _headers.pop("Content-Type", "application/xml") + ) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[AsyncIterator[bytes]] + + _content = body + + request = build_submit_batch_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + multipart_content_type=multipart_content_type, + version=self._config.version, + content=_content, + template_url=self.submit_batch.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + submit_batch.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def filter_blobs( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.FilterBlobsIncludeItem"]]] = None, + **kwargs: Any + ) -> _models.FilterBlobSegment: + """The Filter Blobs operation enables callers to list blobs in a container whose tags match a + given search expression. Filter blobs searches within the given container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. Default value is None. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem] + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "blobs". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "blobs")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.FilterBlobSegment] + + request = build_filter_blobs_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + where=where, + marker=marker, + maxresults=maxresults, + include=include, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.filter_blobs.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("FilterBlobSegment", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + filter_blobs.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def release_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def renew_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "renew". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_renew_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.renew_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. Default value is None. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + break_period=break_period, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Required. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + proposed_lease_id=proposed_lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def list_blob_flat_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.ListBlobsFlatSegmentResponse: + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsFlatSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.ListBlobsFlatSegmentResponse] + + request = build_list_blob_flat_segment_request( + url=self._config.url, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.list_blob_flat_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListBlobsFlatSegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_blob_flat_segment.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def list_blob_hierarchy_segment( + self, + delimiter: str, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.ListBlobsHierarchySegmentResponse: + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. Required. + :type delimiter: str + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.ListBlobsHierarchySegmentResponse] + + request = build_list_blob_hierarchy_segment_request( + url=self._config.url, + delimiter=delimiter, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.list_blob_hierarchy_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_blob_hierarchy_segment.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace_async + async def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Returns the sku name and account kind. + + :keyword restype: restype. Default value is "account". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_get_account_info_request( + url=self._config.url, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_account_info.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name")) + response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {"url": "{url}/{containerName}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_page_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_page_blob_operations.py new file mode 100644 index 00000000000..2ef6ac25985 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_page_blob_operations.py @@ -0,0 +1,1478 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._page_blob_operations import ( + build_clear_pages_request, + build_copy_incremental_request, + build_create_request, + build_get_page_ranges_diff_request, + build_get_page_ranges_request, + build_resize_request, + build_update_sequence_number_request, + build_upload_pages_from_url_request, + build_upload_pages_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class PageBlobOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.aio.AzureBlobStorage`'s + :attr:`page_blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def create( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + blob_content_length: int, + timeout: Optional[int] = None, + tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, + metadata: Optional[Dict[str, str]] = None, + blob_sequence_number: int = 0, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Create operation creates a new page blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. Required. + :type blob_content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. Known values are: "P4", + "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", and "P80". Default value is None. + :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. Default value is 0. + :type blob_sequence_number: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "PageBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "PageBlob")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_create_request( + url=self._config.url, + content_length=content_length, + blob_content_length=blob_content_length, + timeout=timeout, + tier=tier, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + blob_sequence_number=blob_sequence_number, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + blob_type=blob_type, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def upload_pages( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. Default value is None. + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "page". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword page_write: Required. You may specify one of the following options: + + + * Update: Writes the bytes specified by the request body into the specified range. The Range + and Content-Length headers must match to perform the update. + * Clear: Clears the specified range and releases the space used in storage for that range. To + clear a range, set the Content-Length header to zero, and the Range header to a value that + indicates the range to clear, up to maximum blob size. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype page_write: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if sequence_number_access_conditions is not None: + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_less_than_or_equal_to = ( + sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + ) + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = body + + request = build_upload_pages_request( + url=self._config.url, + content_length=content_length, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + timeout=timeout, + range=range, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, + if_sequence_number_less_than=_if_sequence_number_less_than, + if_sequence_number_equal_to=_if_sequence_number_equal_to, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + page_write=page_write, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.upload_pages.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def clear_pages( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. Default value is None. + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "page". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword page_write: Required. You may specify one of the following options: + + + * Update: Writes the bytes specified by the request body into the specified range. The Range + and Content-Length headers must match to perform the update. + * Clear: Clears the specified range and releases the space used in storage for that range. To + clear a range, set the Content-Length header to zero, and the Range header to a value that + indicates the range to clear, up to maximum blob size. Default value is "clear". Note that + overriding this default value may result in unsupported behavior. + :paramtype page_write: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "clear")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if sequence_number_access_conditions is not None: + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_less_than_or_equal_to = ( + sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + ) + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_clear_pages_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + range=range, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, + if_sequence_number_less_than=_if_sequence_number_less_than, + if_sequence_number_equal_to=_if_sequence_number_equal_to, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + page_write=page_write, + version=self._config.version, + template_url=self.clear_pages.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + clear_pages.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def upload_pages_from_url( # pylint: disable=inconsistent-return-statements + self, + source_url: str, + source_range: str, + content_length: int, + range: str, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob where the contents are read + from a URL. + + :param source_url: Specify a URL to the copy source. Required. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The length of this range + should match the ContentLength header and x-ms-range/Range destination range header. Required. + :type source_range: str + :param content_length: The length of the request. Required. + :type content_length: int + :param range: The range of bytes to which the source range would be written. The range should + be 512 aligned and range-end is required. Required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_contentcrc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Parameter group. Default value is None. + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword comp: comp. Default value is "page". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword page_write: Required. You may specify one of the following options: + + + * Update: Writes the bytes specified by the request body into the specified range. The Range + and Content-Length headers must match to perform the update. + * Clear: Clears the specified range and releases the space used in storage for that range. To + clear a range, set the Content-Length header to zero, and the Range header to a value that + indicates the range to clear, up to maximum blob size. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype page_write: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if sequence_number_access_conditions is not None: + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_less_than_or_equal_to = ( + sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + ) + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_upload_pages_from_url_request( + url=self._config.url, + source_url=source_url, + source_range=source_range, + content_length=content_length, + range=range, + source_content_md5=source_content_md5, + source_contentcrc64=source_contentcrc64, + timeout=timeout, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + lease_id=_lease_id, + if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, + if_sequence_number_less_than=_if_sequence_number_less_than, + if_sequence_number_equal_to=_if_sequence_number_equal_to, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + request_id_parameter=request_id_parameter, + copy_source_authorization=copy_source_authorization, + comp=comp, + page_write=page_write, + version=self._config.version, + template_url=self.upload_pages_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def get_page_ranges( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> _models.PageList: + """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot + of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "pagelist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "pagelist")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.PageList] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_get_page_ranges_request( + url=self._config.url, + snapshot=snapshot, + timeout=timeout, + range=range, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + marker=marker, + maxresults=maxresults, + comp=comp, + version=self._config.version, + template_url=self.get_page_ranges.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-blob-content-length"] = self._deserialize( + "int", response.headers.get("x-ms-blob-content-length") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("PageList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_page_ranges.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def get_page_ranges_diff( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + prevsnapshot: Optional[str] = None, + prev_snapshot_url: Optional[str] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> _models.PageList: + """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that + were changed between target blob and previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a + DateTime value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is + the older of the two. Note that incremental snapshots are currently supported only for blobs + created on or after January 1, 2016. Default value is None. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in service versions + 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The + response will only contain pages that were changed between the target blob and its previous + snapshot. Default value is None. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "pagelist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "pagelist")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.PageList] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_get_page_ranges_diff_request( + url=self._config.url, + snapshot=snapshot, + timeout=timeout, + prevsnapshot=prevsnapshot, + prev_snapshot_url=prev_snapshot_url, + range=range, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + marker=marker, + maxresults=maxresults, + comp=comp, + version=self._config.version, + template_url=self.get_page_ranges_diff.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-blob-content-length"] = self._deserialize( + "int", response.headers.get("x-ms-blob-content-length") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("PageList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_page_ranges_diff.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def resize( # pylint: disable=inconsistent-return-statements + self, + blob_content_length: int, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. Required. + :type blob_content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_resize_request( + url=self._config.url, + blob_content_length=blob_content_length, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.resize.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + resize.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def update_sequence_number( # pylint: disable=inconsistent-return-statements + self, + sequence_number_action: Union[str, "_models.SequenceNumberActionType"], + timeout: Optional[int] = None, + blob_sequence_number: int = 0, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the + request. This property applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. Known values are: "max", "update", and "increment". + Required. + :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. Default value is 0. + :type blob_sequence_number: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_update_sequence_number_request( + url=self._config.url, + sequence_number_action=sequence_number_action, + timeout=timeout, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + blob_sequence_number=blob_sequence_number, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.update_sequence_number.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + update_sequence_number.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace_async + async def copy_incremental( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Copy Incremental operation copies a snapshot of the source page blob to a destination page + blob. The snapshot is copied such that only the differential changes between the previously + copied snapshot are transferred to the destination. The copied snapshots are complete copies of + the original snapshot and can be read or copied from as usual. This API is supported since REST + version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "incrementalcopy". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "incrementalcopy")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_copy_incremental_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.copy_incremental.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_incremental.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_patch.py new file mode 100644 index 00000000000..029b47fe478 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_patch.py @@ -0,0 +1,23 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import List +__all__ = [] # type: List[str] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_service_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_service_operations.py new file mode 100644 index 00000000000..fd98fdac1f3 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/aio/operations/_service_operations.py @@ -0,0 +1,752 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, AsyncIterator, Callable, Dict, IO, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._service_operations import ( + build_filter_blobs_request, + build_get_account_info_request, + build_get_properties_request, + build_get_statistics_request, + build_get_user_delegation_key_request, + build_list_containers_segment_request, + build_set_properties_request, + build_submit_batch_request, +) + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ServiceOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.aio.AzureBlobStorage`'s + :attr:`service` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def set_properties( # pylint: disable=inconsistent-return-statements + self, + storage_service_properties: _models.StorageServiceProperties, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets properties for a storage account's Blob service endpoint, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. Required. + :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True) + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace_async + async def get_properties( + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> _models.StorageServiceProperties: + """gets the properties of a storage account's Blob service, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.StorageServiceProperties] + + request = build_get_properties_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("StorageServiceProperties", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_properties.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace_async + async def get_statistics( + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> _models.StorageServiceStats: + """Retrieves statistics related to replication for the Blob service. It is only available on the + secondary location endpoint when read-access geo-redundant replication is enabled for the + storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "stats". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceStats or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "stats")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.StorageServiceStats] + + request = build_get_statistics_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_statistics.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("StorageServiceStats", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_statistics.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace_async + async def list_containers_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.ListContainersSegmentResponse: + """The List Containers Segment operation returns a list of the containers under the specified + account. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify that the container's metadata be returned as + part of the response body. Default value is None. + :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListContainersSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.ListContainersSegmentResponse] + + request = build_list_containers_segment_request( + url=self._config.url, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.list_containers_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("ListContainersSegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_containers_segment.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace_async + async def get_user_delegation_key( + self, + key_info: _models.KeyInfo, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.UserDelegationKey: + """Retrieves a user delegation key for the Blob service. This is only a valid operation when using + bearer token authentication. + + :param key_info: Key information. Required. + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "userdelegationkey". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UserDelegationKey or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "userdelegationkey")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.UserDelegationKey] + + _content = self._serialize.body(key_info, "KeyInfo", is_xml=True) + + request = build_get_user_delegation_key_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.get_user_delegation_key.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("UserDelegationKey", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_user_delegation_key.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace_async + async def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Returns the sku name and account kind. + + :keyword restype: restype. Default value is "account". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_get_account_info_request( + url=self._config.url, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_account_info.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name")) + response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind")) + response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace_async + async def submit_batch( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "batch". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Async iterator of the response bytes or the result of cls(response) + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "batch")) # type: str + multipart_content_type = kwargs.pop( + "multipart_content_type", _headers.pop("Content-Type", "application/xml") + ) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[AsyncIterator[bytes]] + + _content = body + + request = build_submit_batch_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + multipart_content_type=multipart_content_type, + version=self._config.version, + content=_content, + template_url=self.submit_batch.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + submit_batch.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace_async + async def filter_blobs( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.FilterBlobsIncludeItem"]]] = None, + **kwargs: Any + ) -> _models.FilterBlobSegment: + """The Filter Blobs operation enables callers to list blobs across all containers whose tags match + a given search expression. Filter blobs searches across all containers within a storage + account but can be scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. Default value is None. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem] + :keyword comp: comp. Default value is "blobs". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blobs")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.FilterBlobSegment] + + request = build_filter_blobs_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + where=where, + marker=marker, + maxresults=maxresults, + include=include, + comp=comp, + version=self._config.version, + template_url=self.filter_blobs.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("FilterBlobSegment", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + filter_blobs.metadata = {"url": "{url}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/__init__.py new file mode 100644 index 00000000000..ffa0865eed4 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/__init__.py @@ -0,0 +1,173 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models_py3 import AccessPolicy +from ._models_py3 import AppendPositionAccessConditions +from ._models_py3 import ArrowConfiguration +from ._models_py3 import ArrowField +from ._models_py3 import BlobFlatListSegment +from ._models_py3 import BlobHTTPHeaders +from ._models_py3 import BlobHierarchyListSegment +from ._models_py3 import BlobItemInternal +from ._models_py3 import BlobMetadata +from ._models_py3 import BlobName +from ._models_py3 import BlobPrefix +from ._models_py3 import BlobPropertiesInternal +from ._models_py3 import BlobTag +from ._models_py3 import BlobTags +from ._models_py3 import Block +from ._models_py3 import BlockList +from ._models_py3 import BlockLookupList +from ._models_py3 import ClearRange +from ._models_py3 import ContainerCpkScopeInfo +from ._models_py3 import ContainerItem +from ._models_py3 import ContainerProperties +from ._models_py3 import CorsRule +from ._models_py3 import CpkInfo +from ._models_py3 import CpkScopeInfo +from ._models_py3 import DelimitedTextConfiguration +from ._models_py3 import FilterBlobItem +from ._models_py3 import FilterBlobSegment +from ._models_py3 import GeoReplication +from ._models_py3 import JsonTextConfiguration +from ._models_py3 import KeyInfo +from ._models_py3 import LeaseAccessConditions +from ._models_py3 import ListBlobsFlatSegmentResponse +from ._models_py3 import ListBlobsHierarchySegmentResponse +from ._models_py3 import ListContainersSegmentResponse +from ._models_py3 import Logging +from ._models_py3 import Metrics +from ._models_py3 import ModifiedAccessConditions +from ._models_py3 import PageList +from ._models_py3 import PageRange +from ._models_py3 import QueryFormat +from ._models_py3 import QueryRequest +from ._models_py3 import QuerySerialization +from ._models_py3 import RetentionPolicy +from ._models_py3 import SequenceNumberAccessConditions +from ._models_py3 import SignedIdentifier +from ._models_py3 import SourceModifiedAccessConditions +from ._models_py3 import StaticWebsite +from ._models_py3 import StorageError +from ._models_py3 import StorageServiceProperties +from ._models_py3 import StorageServiceStats +from ._models_py3 import UserDelegationKey + +from ._azure_blob_storage_enums import AccessTier +from ._azure_blob_storage_enums import AccessTierOptional +from ._azure_blob_storage_enums import AccessTierRequired +from ._azure_blob_storage_enums import AccountKind +from ._azure_blob_storage_enums import ArchiveStatus +from ._azure_blob_storage_enums import BlobCopySourceTags +from ._azure_blob_storage_enums import BlobExpiryOptions +from ._azure_blob_storage_enums import BlobImmutabilityPolicyMode +from ._azure_blob_storage_enums import BlobType +from ._azure_blob_storage_enums import BlockListType +from ._azure_blob_storage_enums import CopyStatusType +from ._azure_blob_storage_enums import DeleteSnapshotsOptionType +from ._azure_blob_storage_enums import EncryptionAlgorithmType +from ._azure_blob_storage_enums import FilterBlobsIncludeItem +from ._azure_blob_storage_enums import GeoReplicationStatusType +from ._azure_blob_storage_enums import LeaseDurationType +from ._azure_blob_storage_enums import LeaseStateType +from ._azure_blob_storage_enums import LeaseStatusType +from ._azure_blob_storage_enums import ListBlobsIncludeItem +from ._azure_blob_storage_enums import ListContainersIncludeType +from ._azure_blob_storage_enums import PremiumPageBlobAccessTier +from ._azure_blob_storage_enums import PublicAccessType +from ._azure_blob_storage_enums import QueryFormatType +from ._azure_blob_storage_enums import RehydratePriority +from ._azure_blob_storage_enums import SequenceNumberActionType +from ._azure_blob_storage_enums import SkuName +from ._azure_blob_storage_enums import StorageErrorCode +from ._patch import __all__ as _patch_all +from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AccessPolicy", + "AppendPositionAccessConditions", + "ArrowConfiguration", + "ArrowField", + "BlobFlatListSegment", + "BlobHTTPHeaders", + "BlobHierarchyListSegment", + "BlobItemInternal", + "BlobMetadata", + "BlobName", + "BlobPrefix", + "BlobPropertiesInternal", + "BlobTag", + "BlobTags", + "Block", + "BlockList", + "BlockLookupList", + "ClearRange", + "ContainerCpkScopeInfo", + "ContainerItem", + "ContainerProperties", + "CorsRule", + "CpkInfo", + "CpkScopeInfo", + "DelimitedTextConfiguration", + "FilterBlobItem", + "FilterBlobSegment", + "GeoReplication", + "JsonTextConfiguration", + "KeyInfo", + "LeaseAccessConditions", + "ListBlobsFlatSegmentResponse", + "ListBlobsHierarchySegmentResponse", + "ListContainersSegmentResponse", + "Logging", + "Metrics", + "ModifiedAccessConditions", + "PageList", + "PageRange", + "QueryFormat", + "QueryRequest", + "QuerySerialization", + "RetentionPolicy", + "SequenceNumberAccessConditions", + "SignedIdentifier", + "SourceModifiedAccessConditions", + "StaticWebsite", + "StorageError", + "StorageServiceProperties", + "StorageServiceStats", + "UserDelegationKey", + "AccessTier", + "AccessTierOptional", + "AccessTierRequired", + "AccountKind", + "ArchiveStatus", + "BlobCopySourceTags", + "BlobExpiryOptions", + "BlobImmutabilityPolicyMode", + "BlobType", + "BlockListType", + "CopyStatusType", + "DeleteSnapshotsOptionType", + "EncryptionAlgorithmType", + "FilterBlobsIncludeItem", + "GeoReplicationStatusType", + "LeaseDurationType", + "LeaseStateType", + "LeaseStatusType", + "ListBlobsIncludeItem", + "ListContainersIncludeType", + "PremiumPageBlobAccessTier", + "PublicAccessType", + "QueryFormatType", + "RehydratePriority", + "SequenceNumberActionType", + "SkuName", + "StorageErrorCode", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_azure_blob_storage_enums.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_azure_blob_storage_enums.py new file mode 100644 index 00000000000..c8aaca620fb --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_azure_blob_storage_enums.py @@ -0,0 +1,390 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class AccessTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AccessTier.""" + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + PREMIUM = "Premium" + COLD = "Cold" + + +class AccessTierOptional(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AccessTierOptional.""" + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + COLD = "Cold" + + +class AccessTierRequired(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AccessTierRequired.""" + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + HOT = "Hot" + COOL = "Cool" + ARCHIVE = "Archive" + COLD = "Cold" + + +class AccountKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """AccountKind.""" + + STORAGE = "Storage" + BLOB_STORAGE = "BlobStorage" + STORAGE_V2 = "StorageV2" + FILE_STORAGE = "FileStorage" + BLOCK_BLOB_STORAGE = "BlockBlobStorage" + + +class ArchiveStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ArchiveStatus.""" + + REHYDRATE_PENDING_TO_HOT = "rehydrate-pending-to-hot" + REHYDRATE_PENDING_TO_COOL = "rehydrate-pending-to-cool" + + +class BlobCopySourceTags(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BlobCopySourceTags.""" + + REPLACE = "REPLACE" + COPY = "COPY" + + +class BlobExpiryOptions(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BlobExpiryOptions.""" + + NEVER_EXPIRE = "NeverExpire" + RELATIVE_TO_CREATION = "RelativeToCreation" + RELATIVE_TO_NOW = "RelativeToNow" + ABSOLUTE = "Absolute" + + +class BlobImmutabilityPolicyMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BlobImmutabilityPolicyMode.""" + + MUTABLE = "Mutable" + UNLOCKED = "Unlocked" + LOCKED = "Locked" + + +class BlobType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BlobType.""" + + BLOCK_BLOB = "BlockBlob" + PAGE_BLOB = "PageBlob" + APPEND_BLOB = "AppendBlob" + + +class BlockListType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """BlockListType.""" + + COMMITTED = "committed" + UNCOMMITTED = "uncommitted" + ALL = "all" + + +class CopyStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """CopyStatusType.""" + + PENDING = "pending" + SUCCESS = "success" + ABORTED = "aborted" + FAILED = "failed" + + +class DeleteSnapshotsOptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """DeleteSnapshotsOptionType.""" + + INCLUDE = "include" + ONLY = "only" + + +class EncryptionAlgorithmType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """EncryptionAlgorithmType.""" + + NONE = "None" + AES256 = "AES256" + + +class FilterBlobsIncludeItem(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """FilterBlobsIncludeItem.""" + + NONE = "none" + VERSIONS = "versions" + + +class GeoReplicationStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The status of the secondary location.""" + + LIVE = "live" + BOOTSTRAP = "bootstrap" + UNAVAILABLE = "unavailable" + + +class LeaseDurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """LeaseDurationType.""" + + INFINITE = "infinite" + FIXED = "fixed" + + +class LeaseStateType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """LeaseStateType.""" + + AVAILABLE = "available" + LEASED = "leased" + EXPIRED = "expired" + BREAKING = "breaking" + BROKEN = "broken" + + +class LeaseStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """LeaseStatusType.""" + + LOCKED = "locked" + UNLOCKED = "unlocked" + + +class ListBlobsIncludeItem(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ListBlobsIncludeItem.""" + + COPY = "copy" + DELETED = "deleted" + METADATA = "metadata" + SNAPSHOTS = "snapshots" + UNCOMMITTEDBLOBS = "uncommittedblobs" + VERSIONS = "versions" + TAGS = "tags" + IMMUTABILITYPOLICY = "immutabilitypolicy" + LEGALHOLD = "legalhold" + DELETEDWITHVERSIONS = "deletedwithversions" + + +class ListContainersIncludeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ListContainersIncludeType.""" + + METADATA = "metadata" + DELETED = "deleted" + SYSTEM = "system" + + +class PremiumPageBlobAccessTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """PremiumPageBlobAccessTier.""" + + P4 = "P4" + P6 = "P6" + P10 = "P10" + P15 = "P15" + P20 = "P20" + P30 = "P30" + P40 = "P40" + P50 = "P50" + P60 = "P60" + P70 = "P70" + P80 = "P80" + + +class PublicAccessType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """PublicAccessType.""" + + CONTAINER = "container" + BLOB = "blob" + + +class QueryFormatType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The quick query format type.""" + + DELIMITED = "delimited" + JSON = "json" + ARROW = "arrow" + PARQUET = "parquet" + + +class RehydratePriority(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """If an object is in rehydrate pending state then this header is returned with priority of + rehydrate. Valid values are High and Standard. + """ + + HIGH = "High" + STANDARD = "Standard" + + +class SequenceNumberActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """SequenceNumberActionType.""" + + MAX = "max" + UPDATE = "update" + INCREMENT = "increment" + + +class SkuName(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """SkuName.""" + + STANDARD_LRS = "Standard_LRS" + STANDARD_GRS = "Standard_GRS" + STANDARD_RAGRS = "Standard_RAGRS" + STANDARD_ZRS = "Standard_ZRS" + PREMIUM_LRS = "Premium_LRS" + + +class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Error codes returned by the service.""" + + ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" + ACCOUNT_BEING_CREATED = "AccountBeingCreated" + ACCOUNT_IS_DISABLED = "AccountIsDisabled" + AUTHENTICATION_FAILED = "AuthenticationFailed" + AUTHORIZATION_FAILURE = "AuthorizationFailure" + CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" + CONDITION_NOT_MET = "ConditionNotMet" + EMPTY_METADATA_KEY = "EmptyMetadataKey" + INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" + INTERNAL_ERROR = "InternalError" + INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" + INVALID_HEADER_VALUE = "InvalidHeaderValue" + INVALID_HTTP_VERB = "InvalidHttpVerb" + INVALID_INPUT = "InvalidInput" + INVALID_MD5 = "InvalidMd5" + INVALID_METADATA = "InvalidMetadata" + INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" + INVALID_RANGE = "InvalidRange" + INVALID_RESOURCE_NAME = "InvalidResourceName" + INVALID_URI = "InvalidUri" + INVALID_XML_DOCUMENT = "InvalidXmlDocument" + INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" + MD5_MISMATCH = "Md5Mismatch" + METADATA_TOO_LARGE = "MetadataTooLarge" + MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" + MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" + MISSING_REQUIRED_HEADER = "MissingRequiredHeader" + MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" + MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" + OPERATION_TIMED_OUT = "OperationTimedOut" + OUT_OF_RANGE_INPUT = "OutOfRangeInput" + OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" + REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" + RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" + REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" + RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" + RESOURCE_NOT_FOUND = "ResourceNotFound" + SERVER_BUSY = "ServerBusy" + UNSUPPORTED_HEADER = "UnsupportedHeader" + UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" + UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" + UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" + APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" + BLOB_ALREADY_EXISTS = "BlobAlreadyExists" + BLOB_IMMUTABLE_DUE_TO_POLICY = "BlobImmutableDueToPolicy" + BLOB_NOT_FOUND = "BlobNotFound" + BLOB_OVERWRITTEN = "BlobOverwritten" + BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" + BLOB_USES_CUSTOMER_SPECIFIED_ENCRYPTION = "BlobUsesCustomerSpecifiedEncryption" + BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" + BLOCK_LIST_TOO_LONG = "BlockListTooLong" + CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" + CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" + CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" + CONTAINER_BEING_DELETED = "ContainerBeingDeleted" + CONTAINER_DISABLED = "ContainerDisabled" + CONTAINER_NOT_FOUND = "ContainerNotFound" + CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" + COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" + COPY_ID_MISMATCH = "CopyIdMismatch" + FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" + INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" + INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" + INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" + INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" + INVALID_BLOB_TIER = "InvalidBlobTier" + INVALID_BLOB_TYPE = "InvalidBlobType" + INVALID_BLOCK_ID = "InvalidBlockId" + INVALID_BLOCK_LIST = "InvalidBlockList" + INVALID_OPERATION = "InvalidOperation" + INVALID_PAGE_RANGE = "InvalidPageRange" + INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" + INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" + INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" + LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" + LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" + LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" + LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" + LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" + LEASE_ID_MISSING = "LeaseIdMissing" + LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" + LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" + LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" + LEASE_LOST = "LeaseLost" + LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" + LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" + LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" + MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" + NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" + NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" + OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" + PENDING_COPY_OPERATION = "PendingCopyOperation" + PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" + PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" + PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" + SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" + SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" + SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" + SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded" + SNAPSHOTS_PRESENT = "SnapshotsPresent" + SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" + SYSTEM_IN_USE = "SystemInUse" + TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" + UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" + BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" + BLOB_ARCHIVED = "BlobArchived" + BLOB_NOT_ARCHIVED = "BlobNotArchived" + AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" + AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" + AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" + AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" + AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_models_py3.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_models_py3.py new file mode 100644 index 00000000000..21f534a9a52 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_models_py3.py @@ -0,0 +1,2758 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +import sys +from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union + +from .. import _serialization + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore # pylint: disable=ungrouped-imports +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object + + +class AccessPolicy(_serialization.Model): + """An Access policy. + + :ivar start: the date-time the policy is active. + :vartype start: str + :ivar expiry: the date-time the policy expires. + :vartype expiry: str + :ivar permission: the permissions for the acl policy. + :vartype permission: str + """ + + _attribute_map = { + "start": {"key": "Start", "type": "str"}, + "expiry": {"key": "Expiry", "type": "str"}, + "permission": {"key": "Permission", "type": "str"}, + } + + def __init__( + self, *, start: Optional[str] = None, expiry: Optional[str] = None, permission: Optional[str] = None, **kwargs + ): + """ + :keyword start: the date-time the policy is active. + :paramtype start: str + :keyword expiry: the date-time the policy expires. + :paramtype expiry: str + :keyword permission: the permissions for the acl policy. + :paramtype permission: str + """ + super().__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class AppendPositionAccessConditions(_serialization.Model): + """Parameter group. + + :ivar max_size: Optional conditional header. The max length in bytes permitted for the append + blob. If the Append Block operation would cause the blob to exceed that limit or if the blob + size is already greater than the value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :vartype max_size: int + :ivar append_position: Optional conditional header, used only for the Append Block operation. A + number indicating the byte offset to compare. Append Block will succeed only if the append + position is equal to this number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + :vartype append_position: int + """ + + _attribute_map = { + "max_size": {"key": "maxSize", "type": "int"}, + "append_position": {"key": "appendPosition", "type": "int"}, + } + + def __init__(self, *, max_size: Optional[int] = None, append_position: Optional[int] = None, **kwargs): + """ + :keyword max_size: Optional conditional header. The max length in bytes permitted for the + append blob. If the Append Block operation would cause the blob to exceed that limit or if the + blob size is already greater than the value specified in this header, the request will fail + with MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :paramtype max_size: int + :keyword append_position: Optional conditional header, used only for the Append Block + operation. A number indicating the byte offset to compare. Append Block will succeed only if + the append position is equal to this number. If it is not, the request will fail with the + AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + :paramtype append_position: int + """ + super().__init__(**kwargs) + self.max_size = max_size + self.append_position = append_position + + +class ArrowConfiguration(_serialization.Model): + """Groups the settings used for formatting the response if the response should be Arrow formatted. + + All required parameters must be populated in order to send to Azure. + + :ivar schema: Required. + :vartype schema: list[~azure.storage.blob.models.ArrowField] + """ + + _validation = { + "schema": {"required": True}, + } + + _attribute_map = { + "schema": { + "key": "Schema", + "type": "[ArrowField]", + "xml": {"name": "Schema", "wrapped": True, "itemsName": "Field"}, + }, + } + _xml_map = {"name": "ArrowConfiguration"} + + def __init__(self, *, schema: List["_models.ArrowField"], **kwargs): + """ + :keyword schema: Required. + :paramtype schema: list[~azure.storage.blob.models.ArrowField] + """ + super().__init__(**kwargs) + self.schema = schema + + +class ArrowField(_serialization.Model): + """Groups settings regarding specific field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. + :vartype type: str + :ivar name: + :vartype name: str + :ivar precision: + :vartype precision: int + :ivar scale: + :vartype scale: int + """ + + _validation = { + "type": {"required": True}, + } + + _attribute_map = { + "type": {"key": "Type", "type": "str"}, + "name": {"key": "Name", "type": "str"}, + "precision": {"key": "Precision", "type": "int"}, + "scale": {"key": "Scale", "type": "int"}, + } + _xml_map = {"name": "Field"} + + def __init__( + self, + *, + type: str, + name: Optional[str] = None, + precision: Optional[int] = None, + scale: Optional[int] = None, + **kwargs + ): + """ + :keyword type: Required. + :paramtype type: str + :keyword name: + :paramtype name: str + :keyword precision: + :paramtype precision: int + :keyword scale: + :paramtype scale: int + """ + super().__init__(**kwargs) + self.type = type + self.name = name + self.precision = precision + self.scale = scale + + +class BlobFlatListSegment(_serialization.Model): + """BlobFlatListSegment. + + All required parameters must be populated in order to send to Azure. + + :ivar blob_items: Required. + :vartype blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + "blob_items": {"required": True}, + } + + _attribute_map = { + "blob_items": {"key": "BlobItems", "type": "[BlobItemInternal]", "xml": {"itemsName": "Blob"}}, + } + _xml_map = {"name": "Blobs"} + + def __init__(self, *, blob_items: List["_models.BlobItemInternal"], **kwargs): + """ + :keyword blob_items: Required. + :paramtype blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + super().__init__(**kwargs) + self.blob_items = blob_items + + +class BlobHierarchyListSegment(_serialization.Model): + """BlobHierarchyListSegment. + + All required parameters must be populated in order to send to Azure. + + :ivar blob_prefixes: + :vartype blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :ivar blob_items: Required. + :vartype blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + + _validation = { + "blob_items": {"required": True}, + } + + _attribute_map = { + "blob_prefixes": {"key": "BlobPrefixes", "type": "[BlobPrefix]", "xml": {"name": "BlobPrefix"}}, + "blob_items": {"key": "BlobItems", "type": "[BlobItemInternal]", "xml": {"name": "Blob", "itemsName": "Blob"}}, + } + _xml_map = {"name": "Blobs"} + + def __init__( + self, + *, + blob_items: List["_models.BlobItemInternal"], + blob_prefixes: Optional[List["_models.BlobPrefix"]] = None, + **kwargs + ): + """ + :keyword blob_prefixes: + :paramtype blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] + :keyword blob_items: Required. + :paramtype blob_items: list[~azure.storage.blob.models.BlobItemInternal] + """ + super().__init__(**kwargs) + self.blob_prefixes = blob_prefixes + self.blob_items = blob_items + + +class BlobHTTPHeaders(_serialization.Model): + """Parameter group. + + :ivar blob_cache_control: Optional. Sets the blob's cache control. If specified, this property + is stored with the blob and returned with a read request. + :vartype blob_cache_control: str + :ivar blob_content_type: Optional. Sets the blob's content type. If specified, this property is + stored with the blob and returned with a read request. + :vartype blob_content_type: str + :ivar blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is not + validated, as the hashes for the individual blocks were validated when each was uploaded. + :vartype blob_content_md5: bytes + :ivar blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :vartype blob_content_encoding: str + :ivar blob_content_language: Optional. Set the blob's content language. If specified, this + property is stored with the blob and returned with a read request. + :vartype blob_content_language: str + :ivar blob_content_disposition: Optional. Sets the blob's Content-Disposition header. + :vartype blob_content_disposition: str + """ + + _attribute_map = { + "blob_cache_control": {"key": "blobCacheControl", "type": "str"}, + "blob_content_type": {"key": "blobContentType", "type": "str"}, + "blob_content_md5": {"key": "blobContentMD5", "type": "bytearray"}, + "blob_content_encoding": {"key": "blobContentEncoding", "type": "str"}, + "blob_content_language": {"key": "blobContentLanguage", "type": "str"}, + "blob_content_disposition": {"key": "blobContentDisposition", "type": "str"}, + } + + def __init__( + self, + *, + blob_cache_control: Optional[str] = None, + blob_content_type: Optional[str] = None, + blob_content_md5: Optional[bytes] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + **kwargs + ): + """ + :keyword blob_cache_control: Optional. Sets the blob's cache control. If specified, this + property is stored with the blob and returned with a read request. + :paramtype blob_cache_control: str + :keyword blob_content_type: Optional. Sets the blob's content type. If specified, this property + is stored with the blob and returned with a read request. + :paramtype blob_content_type: str + :keyword blob_content_md5: Optional. An MD5 hash of the blob content. Note that this hash is + not validated, as the hashes for the individual blocks were validated when each was uploaded. + :paramtype blob_content_md5: bytes + :keyword blob_content_encoding: Optional. Sets the blob's content encoding. If specified, this + property is stored with the blob and returned with a read request. + :paramtype blob_content_encoding: str + :keyword blob_content_language: Optional. Set the blob's content language. If specified, this + property is stored with the blob and returned with a read request. + :paramtype blob_content_language: str + :keyword blob_content_disposition: Optional. Sets the blob's Content-Disposition header. + :paramtype blob_content_disposition: str + """ + super().__init__(**kwargs) + self.blob_cache_control = blob_cache_control + self.blob_content_type = blob_content_type + self.blob_content_md5 = blob_content_md5 + self.blob_content_encoding = blob_content_encoding + self.blob_content_language = blob_content_language + self.blob_content_disposition = blob_content_disposition + + +class BlobItemInternal(_serialization.Model): + """An Azure Storage blob. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. + :vartype name: ~azure.storage.blob.models.BlobName + :ivar deleted: Required. + :vartype deleted: bool + :ivar snapshot: Required. + :vartype snapshot: str + :ivar version_id: + :vartype version_id: str + :ivar is_current_version: + :vartype is_current_version: bool + :ivar properties: Properties of a blob. Required. + :vartype properties: ~azure.storage.blob.models.BlobPropertiesInternal + :ivar metadata: + :vartype metadata: ~azure.storage.blob.models.BlobMetadata + :ivar blob_tags: Blob tags. + :vartype blob_tags: ~azure.storage.blob.models.BlobTags + :ivar has_versions_only: + :vartype has_versions_only: bool + :ivar object_replication_metadata: Dictionary of :code:``. + :vartype object_replication_metadata: dict[str, str] + """ + + _validation = { + "name": {"required": True}, + "deleted": {"required": True}, + "snapshot": {"required": True}, + "properties": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "BlobName"}, + "deleted": {"key": "Deleted", "type": "bool"}, + "snapshot": {"key": "Snapshot", "type": "str"}, + "version_id": {"key": "VersionId", "type": "str"}, + "is_current_version": {"key": "IsCurrentVersion", "type": "bool"}, + "properties": {"key": "Properties", "type": "BlobPropertiesInternal"}, + "metadata": {"key": "Metadata", "type": "BlobMetadata"}, + "blob_tags": {"key": "BlobTags", "type": "BlobTags"}, + "has_versions_only": {"key": "HasVersionsOnly", "type": "bool"}, + "object_replication_metadata": {"key": "OrMetadata", "type": "{str}"}, + } + _xml_map = {"name": "Blob"} + + def __init__( + self, + *, + name: "_models.BlobName", + deleted: bool, + snapshot: str, + properties: "_models.BlobPropertiesInternal", + version_id: Optional[str] = None, + is_current_version: Optional[bool] = None, + metadata: Optional["_models.BlobMetadata"] = None, + blob_tags: Optional["_models.BlobTags"] = None, + has_versions_only: Optional[bool] = None, + object_replication_metadata: Optional[Dict[str, str]] = None, + **kwargs + ): + """ + :keyword name: Required. + :paramtype name: ~azure.storage.blob.models.BlobName + :keyword deleted: Required. + :paramtype deleted: bool + :keyword snapshot: Required. + :paramtype snapshot: str + :keyword version_id: + :paramtype version_id: str + :keyword is_current_version: + :paramtype is_current_version: bool + :keyword properties: Properties of a blob. Required. + :paramtype properties: ~azure.storage.blob.models.BlobPropertiesInternal + :keyword metadata: + :paramtype metadata: ~azure.storage.blob.models.BlobMetadata + :keyword blob_tags: Blob tags. + :paramtype blob_tags: ~azure.storage.blob.models.BlobTags + :keyword has_versions_only: + :paramtype has_versions_only: bool + :keyword object_replication_metadata: Dictionary of :code:``. + :paramtype object_replication_metadata: dict[str, str] + """ + super().__init__(**kwargs) + self.name = name + self.deleted = deleted + self.snapshot = snapshot + self.version_id = version_id + self.is_current_version = is_current_version + self.properties = properties + self.metadata = metadata + self.blob_tags = blob_tags + self.has_versions_only = has_versions_only + self.object_replication_metadata = object_replication_metadata + + +class BlobMetadata(_serialization.Model): + """BlobMetadata. + + :ivar additional_properties: Unmatched properties from the message are deserialized to this + collection. + :vartype additional_properties: dict[str, str] + :ivar encrypted: + :vartype encrypted: str + """ + + _attribute_map = { + "additional_properties": {"key": "", "type": "{str}"}, + "encrypted": {"key": "Encrypted", "type": "str", "xml": {"attr": True}}, + } + _xml_map = {"name": "Metadata"} + + def __init__( + self, *, additional_properties: Optional[Dict[str, str]] = None, encrypted: Optional[str] = None, **kwargs + ): + """ + :keyword additional_properties: Unmatched properties from the message are deserialized to this + collection. + :paramtype additional_properties: dict[str, str] + :keyword encrypted: + :paramtype encrypted: str + """ + super().__init__(**kwargs) + self.additional_properties = additional_properties + self.encrypted = encrypted + + +class BlobName(_serialization.Model): + """BlobName. + + :ivar encoded: Indicates if the blob name is encoded. + :vartype encoded: bool + :ivar content: The name of the blob. + :vartype content: str + """ + + _attribute_map = { + "encoded": {"key": "Encoded", "type": "bool", "xml": {"name": "Encoded", "attr": True}}, + "content": {"key": "content", "type": "str", "xml": {"text": True}}, + } + + def __init__(self, *, encoded: Optional[bool] = None, content: Optional[str] = None, **kwargs): + """ + :keyword encoded: Indicates if the blob name is encoded. + :paramtype encoded: bool + :keyword content: The name of the blob. + :paramtype content: str + """ + super().__init__(**kwargs) + self.encoded = encoded + self.content = content + + +class BlobPrefix(_serialization.Model): + """BlobPrefix. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. + :vartype name: ~azure.storage.blob.models.BlobName + """ + + _validation = { + "name": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "BlobName"}, + } + + def __init__(self, *, name: "_models.BlobName", **kwargs): + """ + :keyword name: Required. + :paramtype name: ~azure.storage.blob.models.BlobName + """ + super().__init__(**kwargs) + self.name = name + + +class BlobPropertiesInternal(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Properties of a blob. + + All required parameters must be populated in order to send to Azure. + + :ivar creation_time: + :vartype creation_time: ~datetime.datetime + :ivar last_modified: Required. + :vartype last_modified: ~datetime.datetime + :ivar etag: Required. + :vartype etag: str + :ivar content_length: Size in bytes. + :vartype content_length: int + :ivar content_type: + :vartype content_type: str + :ivar content_encoding: + :vartype content_encoding: str + :ivar content_language: + :vartype content_language: str + :ivar content_md5: + :vartype content_md5: bytes + :ivar content_disposition: + :vartype content_disposition: str + :ivar cache_control: + :vartype cache_control: str + :ivar blob_sequence_number: + :vartype blob_sequence_number: int + :ivar blob_type: Known values are: "BlockBlob", "PageBlob", and "AppendBlob". + :vartype blob_type: str or ~azure.storage.blob.models.BlobType + :ivar lease_status: Known values are: "locked" and "unlocked". + :vartype lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :ivar lease_state: Known values are: "available", "leased", "expired", "breaking", and + "broken". + :vartype lease_state: str or ~azure.storage.blob.models.LeaseStateType + :ivar lease_duration: Known values are: "infinite" and "fixed". + :vartype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :ivar copy_id: + :vartype copy_id: str + :ivar copy_status: Known values are: "pending", "success", "aborted", and "failed". + :vartype copy_status: str or ~azure.storage.blob.models.CopyStatusType + :ivar copy_source: + :vartype copy_source: str + :ivar copy_progress: + :vartype copy_progress: str + :ivar copy_completion_time: + :vartype copy_completion_time: ~datetime.datetime + :ivar copy_status_description: + :vartype copy_status_description: str + :ivar server_encrypted: + :vartype server_encrypted: bool + :ivar incremental_copy: + :vartype incremental_copy: bool + :ivar destination_snapshot: + :vartype destination_snapshot: str + :ivar deleted_time: + :vartype deleted_time: ~datetime.datetime + :ivar remaining_retention_days: + :vartype remaining_retention_days: int + :ivar access_tier: Known values are: "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50", + "P60", "P70", "P80", "Hot", "Cool", "Archive", "Premium", and "Cold". + :vartype access_tier: str or ~azure.storage.blob.models.AccessTier + :ivar access_tier_inferred: + :vartype access_tier_inferred: bool + :ivar archive_status: Known values are: "rehydrate-pending-to-hot" and + "rehydrate-pending-to-cool". + :vartype archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :ivar customer_provided_key_sha256: + :vartype customer_provided_key_sha256: str + :ivar encryption_scope: The name of the encryption scope under which the blob is encrypted. + :vartype encryption_scope: str + :ivar access_tier_change_time: + :vartype access_tier_change_time: ~datetime.datetime + :ivar tag_count: + :vartype tag_count: int + :ivar expires_on: + :vartype expires_on: ~datetime.datetime + :ivar is_sealed: + :vartype is_sealed: bool + :ivar rehydrate_priority: If an object is in rehydrate pending state then this header is + returned with priority of rehydrate. Valid values are High and Standard. Known values are: + "High" and "Standard". + :vartype rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :ivar last_accessed_on: + :vartype last_accessed_on: ~datetime.datetime + :ivar immutability_policy_expires_on: + :vartype immutability_policy_expires_on: ~datetime.datetime + :ivar immutability_policy_mode: Known values are: "Mutable", "Unlocked", and "Locked". + :vartype immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :ivar legal_hold: + :vartype legal_hold: bool + """ + + _validation = { + "last_modified": {"required": True}, + "etag": {"required": True}, + } + + _attribute_map = { + "creation_time": {"key": "Creation-Time", "type": "rfc-1123"}, + "last_modified": {"key": "Last-Modified", "type": "rfc-1123"}, + "etag": {"key": "Etag", "type": "str"}, + "content_length": {"key": "Content-Length", "type": "int"}, + "content_type": {"key": "Content-Type", "type": "str"}, + "content_encoding": {"key": "Content-Encoding", "type": "str"}, + "content_language": {"key": "Content-Language", "type": "str"}, + "content_md5": {"key": "Content-MD5", "type": "bytearray"}, + "content_disposition": {"key": "Content-Disposition", "type": "str"}, + "cache_control": {"key": "Cache-Control", "type": "str"}, + "blob_sequence_number": {"key": "x-ms-blob-sequence-number", "type": "int"}, + "blob_type": {"key": "BlobType", "type": "str"}, + "lease_status": {"key": "LeaseStatus", "type": "str"}, + "lease_state": {"key": "LeaseState", "type": "str"}, + "lease_duration": {"key": "LeaseDuration", "type": "str"}, + "copy_id": {"key": "CopyId", "type": "str"}, + "copy_status": {"key": "CopyStatus", "type": "str"}, + "copy_source": {"key": "CopySource", "type": "str"}, + "copy_progress": {"key": "CopyProgress", "type": "str"}, + "copy_completion_time": {"key": "CopyCompletionTime", "type": "rfc-1123"}, + "copy_status_description": {"key": "CopyStatusDescription", "type": "str"}, + "server_encrypted": {"key": "ServerEncrypted", "type": "bool"}, + "incremental_copy": {"key": "IncrementalCopy", "type": "bool"}, + "destination_snapshot": {"key": "DestinationSnapshot", "type": "str"}, + "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"}, + "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"}, + "access_tier": {"key": "AccessTier", "type": "str"}, + "access_tier_inferred": {"key": "AccessTierInferred", "type": "bool"}, + "archive_status": {"key": "ArchiveStatus", "type": "str"}, + "customer_provided_key_sha256": {"key": "CustomerProvidedKeySha256", "type": "str"}, + "encryption_scope": {"key": "EncryptionScope", "type": "str"}, + "access_tier_change_time": {"key": "AccessTierChangeTime", "type": "rfc-1123"}, + "tag_count": {"key": "TagCount", "type": "int"}, + "expires_on": {"key": "Expiry-Time", "type": "rfc-1123"}, + "is_sealed": {"key": "Sealed", "type": "bool"}, + "rehydrate_priority": {"key": "RehydratePriority", "type": "str"}, + "last_accessed_on": {"key": "LastAccessTime", "type": "rfc-1123"}, + "immutability_policy_expires_on": {"key": "ImmutabilityPolicyUntilDate", "type": "rfc-1123"}, + "immutability_policy_mode": {"key": "ImmutabilityPolicyMode", "type": "str"}, + "legal_hold": {"key": "LegalHold", "type": "bool"}, + } + _xml_map = {"name": "Properties"} + + def __init__( # pylint: disable=too-many-locals + self, + *, + last_modified: datetime.datetime, + etag: str, + creation_time: Optional[datetime.datetime] = None, + content_length: Optional[int] = None, + content_type: Optional[str] = None, + content_encoding: Optional[str] = None, + content_language: Optional[str] = None, + content_md5: Optional[bytes] = None, + content_disposition: Optional[str] = None, + cache_control: Optional[str] = None, + blob_sequence_number: Optional[int] = None, + blob_type: Optional[Union[str, "_models.BlobType"]] = None, + lease_status: Optional[Union[str, "_models.LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "_models.LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "_models.LeaseDurationType"]] = None, + copy_id: Optional[str] = None, + copy_status: Optional[Union[str, "_models.CopyStatusType"]] = None, + copy_source: Optional[str] = None, + copy_progress: Optional[str] = None, + copy_completion_time: Optional[datetime.datetime] = None, + copy_status_description: Optional[str] = None, + server_encrypted: Optional[bool] = None, + incremental_copy: Optional[bool] = None, + destination_snapshot: Optional[str] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + access_tier: Optional[Union[str, "_models.AccessTier"]] = None, + access_tier_inferred: Optional[bool] = None, + archive_status: Optional[Union[str, "_models.ArchiveStatus"]] = None, + customer_provided_key_sha256: Optional[str] = None, + encryption_scope: Optional[str] = None, + access_tier_change_time: Optional[datetime.datetime] = None, + tag_count: Optional[int] = None, + expires_on: Optional[datetime.datetime] = None, + is_sealed: Optional[bool] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + last_accessed_on: Optional[datetime.datetime] = None, + immutability_policy_expires_on: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + **kwargs + ): + """ + :keyword creation_time: + :paramtype creation_time: ~datetime.datetime + :keyword last_modified: Required. + :paramtype last_modified: ~datetime.datetime + :keyword etag: Required. + :paramtype etag: str + :keyword content_length: Size in bytes. + :paramtype content_length: int + :keyword content_type: + :paramtype content_type: str + :keyword content_encoding: + :paramtype content_encoding: str + :keyword content_language: + :paramtype content_language: str + :keyword content_md5: + :paramtype content_md5: bytes + :keyword content_disposition: + :paramtype content_disposition: str + :keyword cache_control: + :paramtype cache_control: str + :keyword blob_sequence_number: + :paramtype blob_sequence_number: int + :keyword blob_type: Known values are: "BlockBlob", "PageBlob", and "AppendBlob". + :paramtype blob_type: str or ~azure.storage.blob.models.BlobType + :keyword lease_status: Known values are: "locked" and "unlocked". + :paramtype lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :keyword lease_state: Known values are: "available", "leased", "expired", "breaking", and + "broken". + :paramtype lease_state: str or ~azure.storage.blob.models.LeaseStateType + :keyword lease_duration: Known values are: "infinite" and "fixed". + :paramtype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :keyword copy_id: + :paramtype copy_id: str + :keyword copy_status: Known values are: "pending", "success", "aborted", and "failed". + :paramtype copy_status: str or ~azure.storage.blob.models.CopyStatusType + :keyword copy_source: + :paramtype copy_source: str + :keyword copy_progress: + :paramtype copy_progress: str + :keyword copy_completion_time: + :paramtype copy_completion_time: ~datetime.datetime + :keyword copy_status_description: + :paramtype copy_status_description: str + :keyword server_encrypted: + :paramtype server_encrypted: bool + :keyword incremental_copy: + :paramtype incremental_copy: bool + :keyword destination_snapshot: + :paramtype destination_snapshot: str + :keyword deleted_time: + :paramtype deleted_time: ~datetime.datetime + :keyword remaining_retention_days: + :paramtype remaining_retention_days: int + :keyword access_tier: Known values are: "P4", "P6", "P10", "P15", "P20", "P30", "P40", "P50", + "P60", "P70", "P80", "Hot", "Cool", "Archive", "Premium", and "Cold". + :paramtype access_tier: str or ~azure.storage.blob.models.AccessTier + :keyword access_tier_inferred: + :paramtype access_tier_inferred: bool + :keyword archive_status: Known values are: "rehydrate-pending-to-hot" and + "rehydrate-pending-to-cool". + :paramtype archive_status: str or ~azure.storage.blob.models.ArchiveStatus + :keyword customer_provided_key_sha256: + :paramtype customer_provided_key_sha256: str + :keyword encryption_scope: The name of the encryption scope under which the blob is encrypted. + :paramtype encryption_scope: str + :keyword access_tier_change_time: + :paramtype access_tier_change_time: ~datetime.datetime + :keyword tag_count: + :paramtype tag_count: int + :keyword expires_on: + :paramtype expires_on: ~datetime.datetime + :keyword is_sealed: + :paramtype is_sealed: bool + :keyword rehydrate_priority: If an object is in rehydrate pending state then this header is + returned with priority of rehydrate. Valid values are High and Standard. Known values are: + "High" and "Standard". + :paramtype rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :keyword last_accessed_on: + :paramtype last_accessed_on: ~datetime.datetime + :keyword immutability_policy_expires_on: + :paramtype immutability_policy_expires_on: ~datetime.datetime + :keyword immutability_policy_mode: Known values are: "Mutable", "Unlocked", and "Locked". + :paramtype immutability_policy_mode: str or + ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :keyword legal_hold: + :paramtype legal_hold: bool + """ + super().__init__(**kwargs) + self.creation_time = creation_time + self.last_modified = last_modified + self.etag = etag + self.content_length = content_length + self.content_type = content_type + self.content_encoding = content_encoding + self.content_language = content_language + self.content_md5 = content_md5 + self.content_disposition = content_disposition + self.cache_control = cache_control + self.blob_sequence_number = blob_sequence_number + self.blob_type = blob_type + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.copy_id = copy_id + self.copy_status = copy_status + self.copy_source = copy_source + self.copy_progress = copy_progress + self.copy_completion_time = copy_completion_time + self.copy_status_description = copy_status_description + self.server_encrypted = server_encrypted + self.incremental_copy = incremental_copy + self.destination_snapshot = destination_snapshot + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.access_tier = access_tier + self.access_tier_inferred = access_tier_inferred + self.archive_status = archive_status + self.customer_provided_key_sha256 = customer_provided_key_sha256 + self.encryption_scope = encryption_scope + self.access_tier_change_time = access_tier_change_time + self.tag_count = tag_count + self.expires_on = expires_on + self.is_sealed = is_sealed + self.rehydrate_priority = rehydrate_priority + self.last_accessed_on = last_accessed_on + self.immutability_policy_expires_on = immutability_policy_expires_on + self.immutability_policy_mode = immutability_policy_mode + self.legal_hold = legal_hold + + +class BlobTag(_serialization.Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :ivar key: Required. + :vartype key: str + :ivar value: Required. + :vartype value: str + """ + + _validation = { + "key": {"required": True}, + "value": {"required": True}, + } + + _attribute_map = { + "key": {"key": "Key", "type": "str"}, + "value": {"key": "Value", "type": "str"}, + } + _xml_map = {"name": "Tag"} + + def __init__(self, *, key: str, value: str, **kwargs): + """ + :keyword key: Required. + :paramtype key: str + :keyword value: Required. + :paramtype value: str + """ + super().__init__(**kwargs) + self.key = key + self.value = value + + +class BlobTags(_serialization.Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :ivar blob_tag_set: Required. + :vartype blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + "blob_tag_set": {"required": True}, + } + + _attribute_map = { + "blob_tag_set": { + "key": "BlobTagSet", + "type": "[BlobTag]", + "xml": {"name": "TagSet", "wrapped": True, "itemsName": "Tag"}, + }, + } + _xml_map = {"name": "Tags"} + + def __init__(self, *, blob_tag_set: List["_models.BlobTag"], **kwargs): + """ + :keyword blob_tag_set: Required. + :paramtype blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + super().__init__(**kwargs) + self.blob_tag_set = blob_tag_set + + +class Block(_serialization.Model): + """Represents a single block in a block blob. It describes the block's ID and size. + + All required parameters must be populated in order to send to Azure. + + :ivar name: The base64 encoded block ID. Required. + :vartype name: str + :ivar size: The block size in bytes. Required. + :vartype size: int + """ + + _validation = { + "name": {"required": True}, + "size": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "str"}, + "size": {"key": "Size", "type": "int"}, + } + + def __init__(self, *, name: str, size: int, **kwargs): + """ + :keyword name: The base64 encoded block ID. Required. + :paramtype name: str + :keyword size: The block size in bytes. Required. + :paramtype size: int + """ + super().__init__(**kwargs) + self.name = name + self.size = size + + +class BlockList(_serialization.Model): + """BlockList. + + :ivar committed_blocks: + :vartype committed_blocks: list[~azure.storage.blob.models.Block] + :ivar uncommitted_blocks: + :vartype uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + + _attribute_map = { + "committed_blocks": {"key": "CommittedBlocks", "type": "[Block]", "xml": {"wrapped": True}}, + "uncommitted_blocks": {"key": "UncommittedBlocks", "type": "[Block]", "xml": {"wrapped": True}}, + } + + def __init__( + self, + *, + committed_blocks: Optional[List["_models.Block"]] = None, + uncommitted_blocks: Optional[List["_models.Block"]] = None, + **kwargs + ): + """ + :keyword committed_blocks: + :paramtype committed_blocks: list[~azure.storage.blob.models.Block] + :keyword uncommitted_blocks: + :paramtype uncommitted_blocks: list[~azure.storage.blob.models.Block] + """ + super().__init__(**kwargs) + self.committed_blocks = committed_blocks + self.uncommitted_blocks = uncommitted_blocks + + +class BlockLookupList(_serialization.Model): + """BlockLookupList. + + :ivar committed: + :vartype committed: list[str] + :ivar uncommitted: + :vartype uncommitted: list[str] + :ivar latest: + :vartype latest: list[str] + """ + + _attribute_map = { + "committed": {"key": "Committed", "type": "[str]", "xml": {"itemsName": "Committed"}}, + "uncommitted": {"key": "Uncommitted", "type": "[str]", "xml": {"itemsName": "Uncommitted"}}, + "latest": {"key": "Latest", "type": "[str]", "xml": {"itemsName": "Latest"}}, + } + _xml_map = {"name": "BlockList"} + + def __init__( + self, + *, + committed: Optional[List[str]] = None, + uncommitted: Optional[List[str]] = None, + latest: Optional[List[str]] = None, + **kwargs + ): + """ + :keyword committed: + :paramtype committed: list[str] + :keyword uncommitted: + :paramtype uncommitted: list[str] + :keyword latest: + :paramtype latest: list[str] + """ + super().__init__(**kwargs) + self.committed = committed + self.uncommitted = uncommitted + self.latest = latest + + +class ClearRange(_serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :ivar start: Required. + :vartype start: int + :ivar end: Required. + :vartype end: int + """ + + _validation = { + "start": {"required": True}, + "end": {"required": True}, + } + + _attribute_map = { + "start": {"key": "Start", "type": "int", "xml": {"name": "Start"}}, + "end": {"key": "End", "type": "int", "xml": {"name": "End"}}, + } + _xml_map = {"name": "ClearRange"} + + def __init__(self, *, start: int, end: int, **kwargs): + """ + :keyword start: Required. + :paramtype start: int + :keyword end: Required. + :paramtype end: int + """ + super().__init__(**kwargs) + self.start = start + self.end = end + + +class ContainerCpkScopeInfo(_serialization.Model): + """Parameter group. + + :ivar default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the default + encryption scope to set on the container and use for all future writes. + :vartype default_encryption_scope: str + :ivar prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, + prevents any request from specifying a different encryption scope than the scope set on the + container. + :vartype prevent_encryption_scope_override: bool + """ + + _attribute_map = { + "default_encryption_scope": {"key": "DefaultEncryptionScope", "type": "str"}, + "prevent_encryption_scope_override": {"key": "PreventEncryptionScopeOverride", "type": "bool"}, + } + + def __init__( + self, + *, + default_encryption_scope: Optional[str] = None, + prevent_encryption_scope_override: Optional[bool] = None, + **kwargs + ): + """ + :keyword default_encryption_scope: Optional. Version 2019-07-07 and later. Specifies the + default encryption scope to set on the container and use for all future writes. + :paramtype default_encryption_scope: str + :keyword prevent_encryption_scope_override: Optional. Version 2019-07-07 and newer. If true, + prevents any request from specifying a different encryption scope than the scope set on the + container. + :paramtype prevent_encryption_scope_override: bool + """ + super().__init__(**kwargs) + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + + +class ContainerItem(_serialization.Model): + """An Azure Storage container. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. + :vartype name: str + :ivar deleted: + :vartype deleted: bool + :ivar version: + :vartype version: str + :ivar properties: Properties of a container. Required. + :vartype properties: ~azure.storage.blob.models.ContainerProperties + :ivar metadata: Dictionary of :code:``. + :vartype metadata: dict[str, str] + """ + + _validation = { + "name": {"required": True}, + "properties": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "str"}, + "deleted": {"key": "Deleted", "type": "bool"}, + "version": {"key": "Version", "type": "str"}, + "properties": {"key": "Properties", "type": "ContainerProperties"}, + "metadata": {"key": "Metadata", "type": "{str}"}, + } + _xml_map = {"name": "Container"} + + def __init__( + self, + *, + name: str, + properties: "_models.ContainerProperties", + deleted: Optional[bool] = None, + version: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ): + """ + :keyword name: Required. + :paramtype name: str + :keyword deleted: + :paramtype deleted: bool + :keyword version: + :paramtype version: str + :keyword properties: Properties of a container. Required. + :paramtype properties: ~azure.storage.blob.models.ContainerProperties + :keyword metadata: Dictionary of :code:``. + :paramtype metadata: dict[str, str] + """ + super().__init__(**kwargs) + self.name = name + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class ContainerProperties(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Properties of a container. + + All required parameters must be populated in order to send to Azure. + + :ivar last_modified: Required. + :vartype last_modified: ~datetime.datetime + :ivar etag: Required. + :vartype etag: str + :ivar lease_status: Known values are: "locked" and "unlocked". + :vartype lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :ivar lease_state: Known values are: "available", "leased", "expired", "breaking", and + "broken". + :vartype lease_state: str or ~azure.storage.blob.models.LeaseStateType + :ivar lease_duration: Known values are: "infinite" and "fixed". + :vartype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :ivar public_access: Known values are: "container" and "blob". + :vartype public_access: str or ~azure.storage.blob.models.PublicAccessType + :ivar has_immutability_policy: + :vartype has_immutability_policy: bool + :ivar has_legal_hold: + :vartype has_legal_hold: bool + :ivar default_encryption_scope: + :vartype default_encryption_scope: str + :ivar prevent_encryption_scope_override: + :vartype prevent_encryption_scope_override: bool + :ivar deleted_time: + :vartype deleted_time: ~datetime.datetime + :ivar remaining_retention_days: + :vartype remaining_retention_days: int + :ivar is_immutable_storage_with_versioning_enabled: Indicates if version level worm is enabled + on this container. + :vartype is_immutable_storage_with_versioning_enabled: bool + """ + + _validation = { + "last_modified": {"required": True}, + "etag": {"required": True}, + } + + _attribute_map = { + "last_modified": {"key": "Last-Modified", "type": "rfc-1123"}, + "etag": {"key": "Etag", "type": "str"}, + "lease_status": {"key": "LeaseStatus", "type": "str"}, + "lease_state": {"key": "LeaseState", "type": "str"}, + "lease_duration": {"key": "LeaseDuration", "type": "str"}, + "public_access": {"key": "PublicAccess", "type": "str"}, + "has_immutability_policy": {"key": "HasImmutabilityPolicy", "type": "bool"}, + "has_legal_hold": {"key": "HasLegalHold", "type": "bool"}, + "default_encryption_scope": {"key": "DefaultEncryptionScope", "type": "str"}, + "prevent_encryption_scope_override": {"key": "DenyEncryptionScopeOverride", "type": "bool"}, + "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"}, + "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"}, + "is_immutable_storage_with_versioning_enabled": { + "key": "ImmutableStorageWithVersioningEnabled", + "type": "bool", + }, + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + lease_status: Optional[Union[str, "_models.LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "_models.LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "_models.LeaseDurationType"]] = None, + public_access: Optional[Union[str, "_models.PublicAccessType"]] = None, + has_immutability_policy: Optional[bool] = None, + has_legal_hold: Optional[bool] = None, + default_encryption_scope: Optional[str] = None, + prevent_encryption_scope_override: Optional[bool] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + is_immutable_storage_with_versioning_enabled: Optional[bool] = None, + **kwargs + ): + """ + :keyword last_modified: Required. + :paramtype last_modified: ~datetime.datetime + :keyword etag: Required. + :paramtype etag: str + :keyword lease_status: Known values are: "locked" and "unlocked". + :paramtype lease_status: str or ~azure.storage.blob.models.LeaseStatusType + :keyword lease_state: Known values are: "available", "leased", "expired", "breaking", and + "broken". + :paramtype lease_state: str or ~azure.storage.blob.models.LeaseStateType + :keyword lease_duration: Known values are: "infinite" and "fixed". + :paramtype lease_duration: str or ~azure.storage.blob.models.LeaseDurationType + :keyword public_access: Known values are: "container" and "blob". + :paramtype public_access: str or ~azure.storage.blob.models.PublicAccessType + :keyword has_immutability_policy: + :paramtype has_immutability_policy: bool + :keyword has_legal_hold: + :paramtype has_legal_hold: bool + :keyword default_encryption_scope: + :paramtype default_encryption_scope: str + :keyword prevent_encryption_scope_override: + :paramtype prevent_encryption_scope_override: bool + :keyword deleted_time: + :paramtype deleted_time: ~datetime.datetime + :keyword remaining_retention_days: + :paramtype remaining_retention_days: int + :keyword is_immutable_storage_with_versioning_enabled: Indicates if version level worm is + enabled on this container. + :paramtype is_immutable_storage_with_versioning_enabled: bool + """ + super().__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.public_access = public_access + self.has_immutability_policy = has_immutability_policy + self.has_legal_hold = has_legal_hold + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = prevent_encryption_scope_override + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.is_immutable_storage_with_versioning_enabled = is_immutable_storage_with_versioning_enabled + + +class CorsRule(_serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :ivar allowed_origins: The origin domains that are permitted to make a request against the + storage service via CORS. The origin domain is the domain from which the request originates. + Note that the origin must be an exact case-sensitive match with the origin that the user age + sends to the service. You can also use the wildcard character '*' to allow all origin domains + to make requests via CORS. Required. + :vartype allowed_origins: str + :ivar allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a + CORS request. (comma separated). Required. + :vartype allowed_methods: str + :ivar allowed_headers: the request headers that the origin domain may specify on the CORS + request. Required. + :vartype allowed_headers: str + :ivar exposed_headers: The response headers that may be sent in the response to the CORS + request and exposed by the browser to the request issuer. Required. + :vartype exposed_headers: str + :ivar max_age_in_seconds: The maximum amount time that a browser should cache the preflight + OPTIONS request. Required. + :vartype max_age_in_seconds: int + """ + + _validation = { + "allowed_origins": {"required": True}, + "allowed_methods": {"required": True}, + "allowed_headers": {"required": True}, + "exposed_headers": {"required": True}, + "max_age_in_seconds": {"required": True, "minimum": 0}, + } + + _attribute_map = { + "allowed_origins": {"key": "AllowedOrigins", "type": "str"}, + "allowed_methods": {"key": "AllowedMethods", "type": "str"}, + "allowed_headers": {"key": "AllowedHeaders", "type": "str"}, + "exposed_headers": {"key": "ExposedHeaders", "type": "str"}, + "max_age_in_seconds": {"key": "MaxAgeInSeconds", "type": "int"}, + } + + def __init__( + self, + *, + allowed_origins: str, + allowed_methods: str, + allowed_headers: str, + exposed_headers: str, + max_age_in_seconds: int, + **kwargs + ): + """ + :keyword allowed_origins: The origin domains that are permitted to make a request against the + storage service via CORS. The origin domain is the domain from which the request originates. + Note that the origin must be an exact case-sensitive match with the origin that the user age + sends to the service. You can also use the wildcard character '*' to allow all origin domains + to make requests via CORS. Required. + :paramtype allowed_origins: str + :keyword allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a + CORS request. (comma separated). Required. + :paramtype allowed_methods: str + :keyword allowed_headers: the request headers that the origin domain may specify on the CORS + request. Required. + :paramtype allowed_headers: str + :keyword exposed_headers: The response headers that may be sent in the response to the CORS + request and exposed by the browser to the request issuer. Required. + :paramtype exposed_headers: str + :keyword max_age_in_seconds: The maximum amount time that a browser should cache the preflight + OPTIONS request. Required. + :paramtype max_age_in_seconds: int + """ + super().__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class CpkInfo(_serialization.Model): + """Parameter group. + + :ivar encryption_key: Optional. Specifies the encryption key to use to encrypt the data + provided in the request. If not specified, encryption is performed with the root account + encryption key. For more information, see Encryption at Rest for Azure Storage Services. + :vartype encryption_key: str + :ivar encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be provided + if the x-ms-encryption-key header is provided. + :vartype encryption_key_sha256: str + :ivar encryption_algorithm: The algorithm used to produce the encryption key hash. Currently, + the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is + provided. Known values are: "None" and "AES256". + :vartype encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType + """ + + _attribute_map = { + "encryption_key": {"key": "encryptionKey", "type": "str"}, + "encryption_key_sha256": {"key": "encryptionKeySha256", "type": "str"}, + "encryption_algorithm": {"key": "encryptionAlgorithm", "type": "str"}, + } + + def __init__( + self, + *, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + **kwargs + ): + """ + :keyword encryption_key: Optional. Specifies the encryption key to use to encrypt the data + provided in the request. If not specified, encryption is performed with the root account + encryption key. For more information, see Encryption at Rest for Azure Storage Services. + :paramtype encryption_key: str + :keyword encryption_key_sha256: The SHA-256 hash of the provided encryption key. Must be + provided if the x-ms-encryption-key header is provided. + :paramtype encryption_key_sha256: str + :keyword encryption_algorithm: The algorithm used to produce the encryption key hash. + Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key + header is provided. Known values are: "None" and "AES256". + :paramtype encryption_algorithm: str or ~azure.storage.blob.models.EncryptionAlgorithmType + """ + super().__init__(**kwargs) + self.encryption_key = encryption_key + self.encryption_key_sha256 = encryption_key_sha256 + self.encryption_algorithm = encryption_algorithm + + +class CpkScopeInfo(_serialization.Model): + """Parameter group. + + :ivar encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the + encryption scope to use to encrypt the data provided in the request. If not specified, + encryption is performed with the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :vartype encryption_scope: str + """ + + _attribute_map = { + "encryption_scope": {"key": "encryptionScope", "type": "str"}, + } + + def __init__(self, *, encryption_scope: Optional[str] = None, **kwargs): + """ + :keyword encryption_scope: Optional. Version 2019-07-07 and later. Specifies the name of the + encryption scope to use to encrypt the data provided in the request. If not specified, + encryption is performed with the default account encryption scope. For more information, see + Encryption at Rest for Azure Storage Services. + :paramtype encryption_scope: str + """ + super().__init__(**kwargs) + self.encryption_scope = encryption_scope + + +class DelimitedTextConfiguration(_serialization.Model): + """Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + + :ivar column_separator: The string used to separate columns. + :vartype column_separator: str + :ivar field_quote: The string used to quote a specific field. + :vartype field_quote: str + :ivar record_separator: The string used to separate records. + :vartype record_separator: str + :ivar escape_char: The string used as an escape character. + :vartype escape_char: str + :ivar headers_present: Represents whether the data has headers. + :vartype headers_present: bool + """ + + _attribute_map = { + "column_separator": {"key": "ColumnSeparator", "type": "str", "xml": {"name": "ColumnSeparator"}}, + "field_quote": {"key": "FieldQuote", "type": "str", "xml": {"name": "FieldQuote"}}, + "record_separator": {"key": "RecordSeparator", "type": "str", "xml": {"name": "RecordSeparator"}}, + "escape_char": {"key": "EscapeChar", "type": "str", "xml": {"name": "EscapeChar"}}, + "headers_present": {"key": "HeadersPresent", "type": "bool", "xml": {"name": "HasHeaders"}}, + } + _xml_map = {"name": "DelimitedTextConfiguration"} + + def __init__( + self, + *, + column_separator: Optional[str] = None, + field_quote: Optional[str] = None, + record_separator: Optional[str] = None, + escape_char: Optional[str] = None, + headers_present: Optional[bool] = None, + **kwargs + ): + """ + :keyword column_separator: The string used to separate columns. + :paramtype column_separator: str + :keyword field_quote: The string used to quote a specific field. + :paramtype field_quote: str + :keyword record_separator: The string used to separate records. + :paramtype record_separator: str + :keyword escape_char: The string used as an escape character. + :paramtype escape_char: str + :keyword headers_present: Represents whether the data has headers. + :paramtype headers_present: bool + """ + super().__init__(**kwargs) + self.column_separator = column_separator + self.field_quote = field_quote + self.record_separator = record_separator + self.escape_char = escape_char + self.headers_present = headers_present + + +class FilterBlobItem(_serialization.Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. + :vartype name: str + :ivar container_name: Required. + :vartype container_name: str + :ivar tags: Blob tags. + :vartype tags: ~azure.storage.blob.models.BlobTags + :ivar version_id: + :vartype version_id: str + :ivar is_current_version: + :vartype is_current_version: bool + """ + + _validation = { + "name": {"required": True}, + "container_name": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "str"}, + "container_name": {"key": "ContainerName", "type": "str"}, + "tags": {"key": "Tags", "type": "BlobTags"}, + "version_id": {"key": "VersionId", "type": "str"}, + "is_current_version": {"key": "IsCurrentVersion", "type": "bool"}, + } + _xml_map = {"name": "Blob"} + + def __init__( + self, + *, + name: str, + container_name: str, + tags: Optional["_models.BlobTags"] = None, + version_id: Optional[str] = None, + is_current_version: Optional[bool] = None, + **kwargs + ): + """ + :keyword name: Required. + :paramtype name: str + :keyword container_name: Required. + :paramtype container_name: str + :keyword tags: Blob tags. + :paramtype tags: ~azure.storage.blob.models.BlobTags + :keyword version_id: + :paramtype version_id: str + :keyword is_current_version: + :paramtype is_current_version: bool + """ + super().__init__(**kwargs) + self.name = name + self.container_name = container_name + self.tags = tags + self.version_id = version_id + self.is_current_version = is_current_version + + +class FilterBlobSegment(_serialization.Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :ivar service_endpoint: Required. + :vartype service_endpoint: str + :ivar where: Required. + :vartype where: str + :ivar blobs: Required. + :vartype blobs: list[~azure.storage.blob.models.FilterBlobItem] + :ivar next_marker: + :vartype next_marker: str + """ + + _validation = { + "service_endpoint": {"required": True}, + "where": {"required": True}, + "blobs": {"required": True}, + } + + _attribute_map = { + "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}}, + "where": {"key": "Where", "type": "str"}, + "blobs": { + "key": "Blobs", + "type": "[FilterBlobItem]", + "xml": {"name": "Blobs", "wrapped": True, "itemsName": "Blob"}, + }, + "next_marker": {"key": "NextMarker", "type": "str"}, + } + _xml_map = {"name": "EnumerationResults"} + + def __init__( + self, + *, + service_endpoint: str, + where: str, + blobs: List["_models.FilterBlobItem"], + next_marker: Optional[str] = None, + **kwargs + ): + """ + :keyword service_endpoint: Required. + :paramtype service_endpoint: str + :keyword where: Required. + :paramtype where: str + :keyword blobs: Required. + :paramtype blobs: list[~azure.storage.blob.models.FilterBlobItem] + :keyword next_marker: + :paramtype next_marker: str + """ + super().__init__(**kwargs) + self.service_endpoint = service_endpoint + self.where = where + self.blobs = blobs + self.next_marker = next_marker + + +class GeoReplication(_serialization.Model): + """Geo-Replication information for the Secondary Storage Service. + + All required parameters must be populated in order to send to Azure. + + :ivar status: The status of the secondary location. Required. Known values are: "live", + "bootstrap", and "unavailable". + :vartype status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :ivar last_sync_time: A GMT date/time value, to the second. All primary writes preceding this + value are guaranteed to be available for read operations at the secondary. Primary writes after + this point in time may or may not be available for reads. Required. + :vartype last_sync_time: ~datetime.datetime + """ + + _validation = { + "status": {"required": True}, + "last_sync_time": {"required": True}, + } + + _attribute_map = { + "status": {"key": "Status", "type": "str"}, + "last_sync_time": {"key": "LastSyncTime", "type": "rfc-1123"}, + } + + def __init__( + self, *, status: Union[str, "_models.GeoReplicationStatusType"], last_sync_time: datetime.datetime, **kwargs + ): + """ + :keyword status: The status of the secondary location. Required. Known values are: "live", + "bootstrap", and "unavailable". + :paramtype status: str or ~azure.storage.blob.models.GeoReplicationStatusType + :keyword last_sync_time: A GMT date/time value, to the second. All primary writes preceding + this value are guaranteed to be available for read operations at the secondary. Primary writes + after this point in time may or may not be available for reads. Required. + :paramtype last_sync_time: ~datetime.datetime + """ + super().__init__(**kwargs) + self.status = status + self.last_sync_time = last_sync_time + + +class JsonTextConfiguration(_serialization.Model): + """json text configuration. + + :ivar record_separator: The string used to separate records. + :vartype record_separator: str + """ + + _attribute_map = { + "record_separator": {"key": "RecordSeparator", "type": "str", "xml": {"name": "RecordSeparator"}}, + } + _xml_map = {"name": "JsonTextConfiguration"} + + def __init__(self, *, record_separator: Optional[str] = None, **kwargs): + """ + :keyword record_separator: The string used to separate records. + :paramtype record_separator: str + """ + super().__init__(**kwargs) + self.record_separator = record_separator + + +class KeyInfo(_serialization.Model): + """Key information. + + All required parameters must be populated in order to send to Azure. + + :ivar start: The date-time the key is active in ISO 8601 UTC time. Required. + :vartype start: str + :ivar expiry: The date-time the key expires in ISO 8601 UTC time. Required. + :vartype expiry: str + """ + + _validation = { + "start": {"required": True}, + "expiry": {"required": True}, + } + + _attribute_map = { + "start": {"key": "Start", "type": "str"}, + "expiry": {"key": "Expiry", "type": "str"}, + } + + def __init__(self, *, start: str, expiry: str, **kwargs): + """ + :keyword start: The date-time the key is active in ISO 8601 UTC time. Required. + :paramtype start: str + :keyword expiry: The date-time the key expires in ISO 8601 UTC time. Required. + :paramtype expiry: str + """ + super().__init__(**kwargs) + self.start = start + self.expiry = expiry + + +class LeaseAccessConditions(_serialization.Model): + """Parameter group. + + :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and + matches this ID. + :vartype lease_id: str + """ + + _attribute_map = { + "lease_id": {"key": "leaseId", "type": "str"}, + } + + def __init__(self, *, lease_id: Optional[str] = None, **kwargs): + """ + :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :paramtype lease_id: str + """ + super().__init__(**kwargs) + self.lease_id = lease_id + + +class ListBlobsFlatSegmentResponse(_serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :ivar service_endpoint: Required. + :vartype service_endpoint: str + :ivar container_name: Required. + :vartype container_name: str + :ivar prefix: + :vartype prefix: str + :ivar marker: + :vartype marker: str + :ivar max_results: + :vartype max_results: int + :ivar segment: Required. + :vartype segment: ~azure.storage.blob.models.BlobFlatListSegment + :ivar next_marker: + :vartype next_marker: str + """ + + _validation = { + "service_endpoint": {"required": True}, + "container_name": {"required": True}, + "segment": {"required": True}, + } + + _attribute_map = { + "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}}, + "container_name": {"key": "ContainerName", "type": "str", "xml": {"attr": True}}, + "prefix": {"key": "Prefix", "type": "str"}, + "marker": {"key": "Marker", "type": "str"}, + "max_results": {"key": "MaxResults", "type": "int"}, + "segment": {"key": "Segment", "type": "BlobFlatListSegment"}, + "next_marker": {"key": "NextMarker", "type": "str"}, + } + _xml_map = {"name": "EnumerationResults"} + + def __init__( + self, + *, + service_endpoint: str, + container_name: str, + segment: "_models.BlobFlatListSegment", + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + next_marker: Optional[str] = None, + **kwargs + ): + """ + :keyword service_endpoint: Required. + :paramtype service_endpoint: str + :keyword container_name: Required. + :paramtype container_name: str + :keyword prefix: + :paramtype prefix: str + :keyword marker: + :paramtype marker: str + :keyword max_results: + :paramtype max_results: int + :keyword segment: Required. + :paramtype segment: ~azure.storage.blob.models.BlobFlatListSegment + :keyword next_marker: + :paramtype next_marker: str + """ + super().__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + + +class ListBlobsHierarchySegmentResponse(_serialization.Model): + """An enumeration of blobs. + + All required parameters must be populated in order to send to Azure. + + :ivar service_endpoint: Required. + :vartype service_endpoint: str + :ivar container_name: Required. + :vartype container_name: str + :ivar prefix: + :vartype prefix: str + :ivar marker: + :vartype marker: str + :ivar max_results: + :vartype max_results: int + :ivar delimiter: + :vartype delimiter: str + :ivar segment: Required. + :vartype segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :ivar next_marker: + :vartype next_marker: str + """ + + _validation = { + "service_endpoint": {"required": True}, + "container_name": {"required": True}, + "segment": {"required": True}, + } + + _attribute_map = { + "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}}, + "container_name": {"key": "ContainerName", "type": "str", "xml": {"attr": True}}, + "prefix": {"key": "Prefix", "type": "str"}, + "marker": {"key": "Marker", "type": "str"}, + "max_results": {"key": "MaxResults", "type": "int"}, + "delimiter": {"key": "Delimiter", "type": "str"}, + "segment": {"key": "Segment", "type": "BlobHierarchyListSegment"}, + "next_marker": {"key": "NextMarker", "type": "str"}, + } + _xml_map = {"name": "EnumerationResults"} + + def __init__( + self, + *, + service_endpoint: str, + container_name: str, + segment: "_models.BlobHierarchyListSegment", + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + delimiter: Optional[str] = None, + next_marker: Optional[str] = None, + **kwargs + ): + """ + :keyword service_endpoint: Required. + :paramtype service_endpoint: str + :keyword container_name: Required. + :paramtype container_name: str + :keyword prefix: + :paramtype prefix: str + :keyword marker: + :paramtype marker: str + :keyword max_results: + :paramtype max_results: int + :keyword delimiter: + :paramtype delimiter: str + :keyword segment: Required. + :paramtype segment: ~azure.storage.blob.models.BlobHierarchyListSegment + :keyword next_marker: + :paramtype next_marker: str + """ + super().__init__(**kwargs) + self.service_endpoint = service_endpoint + self.container_name = container_name + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.delimiter = delimiter + self.segment = segment + self.next_marker = next_marker + + +class ListContainersSegmentResponse(_serialization.Model): + """An enumeration of containers. + + All required parameters must be populated in order to send to Azure. + + :ivar service_endpoint: Required. + :vartype service_endpoint: str + :ivar prefix: + :vartype prefix: str + :ivar marker: + :vartype marker: str + :ivar max_results: + :vartype max_results: int + :ivar container_items: Required. + :vartype container_items: list[~azure.storage.blob.models.ContainerItem] + :ivar next_marker: + :vartype next_marker: str + """ + + _validation = { + "service_endpoint": {"required": True}, + "container_items": {"required": True}, + } + + _attribute_map = { + "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}}, + "prefix": {"key": "Prefix", "type": "str"}, + "marker": {"key": "Marker", "type": "str"}, + "max_results": {"key": "MaxResults", "type": "int"}, + "container_items": { + "key": "ContainerItems", + "type": "[ContainerItem]", + "xml": {"name": "Containers", "wrapped": True, "itemsName": "Container"}, + }, + "next_marker": {"key": "NextMarker", "type": "str"}, + } + _xml_map = {"name": "EnumerationResults"} + + def __init__( + self, + *, + service_endpoint: str, + container_items: List["_models.ContainerItem"], + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + next_marker: Optional[str] = None, + **kwargs + ): + """ + :keyword service_endpoint: Required. + :paramtype service_endpoint: str + :keyword prefix: + :paramtype prefix: str + :keyword marker: + :paramtype marker: str + :keyword max_results: + :paramtype max_results: int + :keyword container_items: Required. + :paramtype container_items: list[~azure.storage.blob.models.ContainerItem] + :keyword next_marker: + :paramtype next_marker: str + """ + super().__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.container_items = container_items + self.next_marker = next_marker + + +class Logging(_serialization.Model): + """Azure Analytics Logging settings. + + All required parameters must be populated in order to send to Azure. + + :ivar version: The version of Storage Analytics to configure. Required. + :vartype version: str + :ivar delete: Indicates whether all delete requests should be logged. Required. + :vartype delete: bool + :ivar read: Indicates whether all read requests should be logged. Required. + :vartype read: bool + :ivar write: Indicates whether all write requests should be logged. Required. + :vartype write: bool + :ivar retention_policy: the retention policy which determines how long the associated data + should persist. Required. + :vartype retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + "version": {"required": True}, + "delete": {"required": True}, + "read": {"required": True}, + "write": {"required": True}, + "retention_policy": {"required": True}, + } + + _attribute_map = { + "version": {"key": "Version", "type": "str"}, + "delete": {"key": "Delete", "type": "bool"}, + "read": {"key": "Read", "type": "bool"}, + "write": {"key": "Write", "type": "bool"}, + "retention_policy": {"key": "RetentionPolicy", "type": "RetentionPolicy"}, + } + + def __init__( + self, + *, + version: str, + delete: bool, + read: bool, + write: bool, + retention_policy: "_models.RetentionPolicy", + **kwargs + ): + """ + :keyword version: The version of Storage Analytics to configure. Required. + :paramtype version: str + :keyword delete: Indicates whether all delete requests should be logged. Required. + :paramtype delete: bool + :keyword read: Indicates whether all read requests should be logged. Required. + :paramtype read: bool + :keyword write: Indicates whether all write requests should be logged. Required. + :paramtype write: bool + :keyword retention_policy: the retention policy which determines how long the associated data + should persist. Required. + :paramtype retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + super().__init__(**kwargs) + self.version = version + self.delete = delete + self.read = read + self.write = write + self.retention_policy = retention_policy + + +class Metrics(_serialization.Model): + """a summary of request statistics grouped by API in hour or minute aggregates for blobs. + + All required parameters must be populated in order to send to Azure. + + :ivar version: The version of Storage Analytics to configure. + :vartype version: str + :ivar enabled: Indicates whether metrics are enabled for the Blob service. Required. + :vartype enabled: bool + :ivar include_apis: Indicates whether metrics should generate summary statistics for called API + operations. + :vartype include_apis: bool + :ivar retention_policy: the retention policy which determines how long the associated data + should persist. + :vartype retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + + _validation = { + "enabled": {"required": True}, + } + + _attribute_map = { + "version": {"key": "Version", "type": "str"}, + "enabled": {"key": "Enabled", "type": "bool"}, + "include_apis": {"key": "IncludeAPIs", "type": "bool"}, + "retention_policy": {"key": "RetentionPolicy", "type": "RetentionPolicy"}, + } + + def __init__( + self, + *, + enabled: bool, + version: Optional[str] = None, + include_apis: Optional[bool] = None, + retention_policy: Optional["_models.RetentionPolicy"] = None, + **kwargs + ): + """ + :keyword version: The version of Storage Analytics to configure. + :paramtype version: str + :keyword enabled: Indicates whether metrics are enabled for the Blob service. Required. + :paramtype enabled: bool + :keyword include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :paramtype include_apis: bool + :keyword retention_policy: the retention policy which determines how long the associated data + should persist. + :paramtype retention_policy: ~azure.storage.blob.models.RetentionPolicy + """ + super().__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class ModifiedAccessConditions(_serialization.Model): + """Parameter group. + + :ivar if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :vartype if_modified_since: ~datetime.datetime + :ivar if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :vartype if_unmodified_since: ~datetime.datetime + :ivar if_match: Specify an ETag value to operate only on blobs with a matching value. + :vartype if_match: str + :ivar if_none_match: Specify an ETag value to operate only on blobs without a matching value. + :vartype if_none_match: str + :ivar if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a matching + value. + :vartype if_tags: str + """ + + _attribute_map = { + "if_modified_since": {"key": "ifModifiedSince", "type": "rfc-1123"}, + "if_unmodified_since": {"key": "ifUnmodifiedSince", "type": "rfc-1123"}, + "if_match": {"key": "ifMatch", "type": "str"}, + "if_none_match": {"key": "ifNoneMatch", "type": "str"}, + "if_tags": {"key": "ifTags", "type": "str"}, + } + + def __init__( + self, + *, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + **kwargs + ): + """ + :keyword if_modified_since: Specify this header value to operate only on a blob if it has been + modified since the specified date/time. + :paramtype if_modified_since: ~datetime.datetime + :keyword if_unmodified_since: Specify this header value to operate only on a blob if it has not + been modified since the specified date/time. + :paramtype if_unmodified_since: ~datetime.datetime + :keyword if_match: Specify an ETag value to operate only on blobs with a matching value. + :paramtype if_match: str + :keyword if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :paramtype if_none_match: str + :keyword if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :paramtype if_tags: str + """ + super().__init__(**kwargs) + self.if_modified_since = if_modified_since + self.if_unmodified_since = if_unmodified_since + self.if_match = if_match + self.if_none_match = if_none_match + self.if_tags = if_tags + + +class PageList(_serialization.Model): + """the list of pages. + + :ivar page_range: + :vartype page_range: list[~azure.storage.blob.models.PageRange] + :ivar clear_range: + :vartype clear_range: list[~azure.storage.blob.models.ClearRange] + :ivar next_marker: + :vartype next_marker: str + """ + + _attribute_map = { + "page_range": {"key": "PageRange", "type": "[PageRange]", "xml": {"itemsName": "PageRange"}}, + "clear_range": {"key": "ClearRange", "type": "[ClearRange]", "xml": {"itemsName": "ClearRange"}}, + "next_marker": {"key": "NextMarker", "type": "str"}, + } + + def __init__( + self, + *, + page_range: Optional[List["_models.PageRange"]] = None, + clear_range: Optional[List["_models.ClearRange"]] = None, + next_marker: Optional[str] = None, + **kwargs + ): + """ + :keyword page_range: + :paramtype page_range: list[~azure.storage.blob.models.PageRange] + :keyword clear_range: + :paramtype clear_range: list[~azure.storage.blob.models.ClearRange] + :keyword next_marker: + :paramtype next_marker: str + """ + super().__init__(**kwargs) + self.page_range = page_range + self.clear_range = clear_range + self.next_marker = next_marker + + +class PageRange(_serialization.Model): + """PageRange. + + All required parameters must be populated in order to send to Azure. + + :ivar start: Required. + :vartype start: int + :ivar end: Required. + :vartype end: int + """ + + _validation = { + "start": {"required": True}, + "end": {"required": True}, + } + + _attribute_map = { + "start": {"key": "Start", "type": "int", "xml": {"name": "Start"}}, + "end": {"key": "End", "type": "int", "xml": {"name": "End"}}, + } + _xml_map = {"name": "PageRange"} + + def __init__(self, *, start: int, end: int, **kwargs): + """ + :keyword start: Required. + :paramtype start: int + :keyword end: Required. + :paramtype end: int + """ + super().__init__(**kwargs) + self.start = start + self.end = end + + +class QueryFormat(_serialization.Model): + """QueryFormat. + + All required parameters must be populated in order to send to Azure. + + :ivar type: The quick query format type. Required. Known values are: "delimited", "json", + "arrow", and "parquet". + :vartype type: str or ~azure.storage.blob.models.QueryFormatType + :ivar delimited_text_configuration: Groups the settings used for interpreting the blob data if + the blob is delimited text formatted. + :vartype delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration + :ivar json_text_configuration: json text configuration. + :vartype json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration + :ivar arrow_configuration: Groups the settings used for formatting the response if the response + should be Arrow formatted. + :vartype arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + :ivar parquet_text_configuration: parquet configuration. + :vartype parquet_text_configuration: JSON + """ + + _validation = { + "type": {"required": True}, + } + + _attribute_map = { + "type": {"key": "Type", "type": "str", "xml": {"name": "Type"}}, + "delimited_text_configuration": {"key": "DelimitedTextConfiguration", "type": "DelimitedTextConfiguration"}, + "json_text_configuration": {"key": "JsonTextConfiguration", "type": "JsonTextConfiguration"}, + "arrow_configuration": {"key": "ArrowConfiguration", "type": "ArrowConfiguration"}, + "parquet_text_configuration": {"key": "ParquetTextConfiguration", "type": "object"}, + } + + def __init__( + self, + *, + type: Union[str, "_models.QueryFormatType"], + delimited_text_configuration: Optional["_models.DelimitedTextConfiguration"] = None, + json_text_configuration: Optional["_models.JsonTextConfiguration"] = None, + arrow_configuration: Optional["_models.ArrowConfiguration"] = None, + parquet_text_configuration: Optional[JSON] = None, + **kwargs + ): + """ + :keyword type: The quick query format type. Required. Known values are: "delimited", "json", + "arrow", and "parquet". + :paramtype type: str or ~azure.storage.blob.models.QueryFormatType + :keyword delimited_text_configuration: Groups the settings used for interpreting the blob data + if the blob is delimited text formatted. + :paramtype delimited_text_configuration: ~azure.storage.blob.models.DelimitedTextConfiguration + :keyword json_text_configuration: json text configuration. + :paramtype json_text_configuration: ~azure.storage.blob.models.JsonTextConfiguration + :keyword arrow_configuration: Groups the settings used for formatting the response if the + response should be Arrow formatted. + :paramtype arrow_configuration: ~azure.storage.blob.models.ArrowConfiguration + :keyword parquet_text_configuration: parquet configuration. + :paramtype parquet_text_configuration: JSON + """ + super().__init__(**kwargs) + self.type = type + self.delimited_text_configuration = delimited_text_configuration + self.json_text_configuration = json_text_configuration + self.arrow_configuration = arrow_configuration + self.parquet_text_configuration = parquet_text_configuration + + +class QueryRequest(_serialization.Model): + """Groups the set of query request settings. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. The type of the provided query expression. Required. Default value + is "SQL". + :vartype query_type: str + :ivar expression: The query expression in SQL. The maximum size of the query expression is + 256KiB. Required. + :vartype expression: str + :ivar input_serialization: + :vartype input_serialization: ~azure.storage.blob.models.QuerySerialization + :ivar output_serialization: + :vartype output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + + _validation = { + "query_type": {"required": True, "constant": True}, + "expression": {"required": True}, + } + + _attribute_map = { + "query_type": {"key": "QueryType", "type": "str", "xml": {"name": "QueryType"}}, + "expression": {"key": "Expression", "type": "str", "xml": {"name": "Expression"}}, + "input_serialization": {"key": "InputSerialization", "type": "QuerySerialization"}, + "output_serialization": {"key": "OutputSerialization", "type": "QuerySerialization"}, + } + _xml_map = {"name": "QueryRequest"} + + query_type = "SQL" + + def __init__( + self, + *, + expression: str, + input_serialization: Optional["_models.QuerySerialization"] = None, + output_serialization: Optional["_models.QuerySerialization"] = None, + **kwargs + ): + """ + :keyword expression: The query expression in SQL. The maximum size of the query expression is + 256KiB. Required. + :paramtype expression: str + :keyword input_serialization: + :paramtype input_serialization: ~azure.storage.blob.models.QuerySerialization + :keyword output_serialization: + :paramtype output_serialization: ~azure.storage.blob.models.QuerySerialization + """ + super().__init__(**kwargs) + self.expression = expression + self.input_serialization = input_serialization + self.output_serialization = output_serialization + + +class QuerySerialization(_serialization.Model): + """QuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :ivar format: Required. + :vartype format: ~azure.storage.blob.models.QueryFormat + """ + + _validation = { + "format": {"required": True}, + } + + _attribute_map = { + "format": {"key": "Format", "type": "QueryFormat"}, + } + + def __init__(self, *, format: "_models.QueryFormat", **kwargs): + """ + :keyword format: Required. + :paramtype format: ~azure.storage.blob.models.QueryFormat + """ + super().__init__(**kwargs) + self.format = format + + +class RetentionPolicy(_serialization.Model): + """the retention policy which determines how long the associated data should persist. + + All required parameters must be populated in order to send to Azure. + + :ivar enabled: Indicates whether a retention policy is enabled for the storage service. + Required. + :vartype enabled: bool + :ivar days: Indicates the number of days that metrics or logging or soft-deleted data should be + retained. All data older than this value will be deleted. + :vartype days: int + :ivar allow_permanent_delete: Indicates whether permanent delete is allowed on this storage + account. + :vartype allow_permanent_delete: bool + """ + + _validation = { + "enabled": {"required": True}, + "days": {"minimum": 1}, + } + + _attribute_map = { + "enabled": {"key": "Enabled", "type": "bool"}, + "days": {"key": "Days", "type": "int"}, + "allow_permanent_delete": {"key": "AllowPermanentDelete", "type": "bool"}, + } + + def __init__( + self, *, enabled: bool, days: Optional[int] = None, allow_permanent_delete: Optional[bool] = None, **kwargs + ): + """ + :keyword enabled: Indicates whether a retention policy is enabled for the storage service. + Required. + :paramtype enabled: bool + :keyword days: Indicates the number of days that metrics or logging or soft-deleted data should + be retained. All data older than this value will be deleted. + :paramtype days: int + :keyword allow_permanent_delete: Indicates whether permanent delete is allowed on this storage + account. + :paramtype allow_permanent_delete: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + self.days = days + self.allow_permanent_delete = allow_permanent_delete + + +class SequenceNumberAccessConditions(_serialization.Model): + """Parameter group. + + :ivar if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on a + blob if it has a sequence number less than or equal to the specified. + :vartype if_sequence_number_less_than_or_equal_to: int + :ivar if_sequence_number_less_than: Specify this header value to operate only on a blob if it + has a sequence number less than the specified. + :vartype if_sequence_number_less_than: int + :ivar if_sequence_number_equal_to: Specify this header value to operate only on a blob if it + has the specified sequence number. + :vartype if_sequence_number_equal_to: int + """ + + _attribute_map = { + "if_sequence_number_less_than_or_equal_to": {"key": "ifSequenceNumberLessThanOrEqualTo", "type": "int"}, + "if_sequence_number_less_than": {"key": "ifSequenceNumberLessThan", "type": "int"}, + "if_sequence_number_equal_to": {"key": "ifSequenceNumberEqualTo", "type": "int"}, + } + + def __init__( + self, + *, + if_sequence_number_less_than_or_equal_to: Optional[int] = None, + if_sequence_number_less_than: Optional[int] = None, + if_sequence_number_equal_to: Optional[int] = None, + **kwargs + ): + """ + :keyword if_sequence_number_less_than_or_equal_to: Specify this header value to operate only on + a blob if it has a sequence number less than or equal to the specified. + :paramtype if_sequence_number_less_than_or_equal_to: int + :keyword if_sequence_number_less_than: Specify this header value to operate only on a blob if + it has a sequence number less than the specified. + :paramtype if_sequence_number_less_than: int + :keyword if_sequence_number_equal_to: Specify this header value to operate only on a blob if it + has the specified sequence number. + :paramtype if_sequence_number_equal_to: int + """ + super().__init__(**kwargs) + self.if_sequence_number_less_than_or_equal_to = if_sequence_number_less_than_or_equal_to + self.if_sequence_number_less_than = if_sequence_number_less_than + self.if_sequence_number_equal_to = if_sequence_number_equal_to + + +class SignedIdentifier(_serialization.Model): + """signed identifier. + + All required parameters must be populated in order to send to Azure. + + :ivar id: a unique id. Required. + :vartype id: str + :ivar access_policy: An Access policy. + :vartype access_policy: ~azure.storage.blob.models.AccessPolicy + """ + + _validation = { + "id": {"required": True}, + } + + _attribute_map = { + "id": {"key": "Id", "type": "str"}, + "access_policy": {"key": "AccessPolicy", "type": "AccessPolicy"}, + } + _xml_map = {"name": "SignedIdentifier"} + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + access_policy: Optional["_models.AccessPolicy"] = None, + **kwargs + ): + """ + :keyword id: a unique id. Required. + :paramtype id: str + :keyword access_policy: An Access policy. + :paramtype access_policy: ~azure.storage.blob.models.AccessPolicy + """ + super().__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SourceModifiedAccessConditions(_serialization.Model): + """Parameter group. + + :ivar source_if_modified_since: Specify this header value to operate only on a blob if it has + been modified since the specified date/time. + :vartype source_if_modified_since: ~datetime.datetime + :ivar source_if_unmodified_since: Specify this header value to operate only on a blob if it has + not been modified since the specified date/time. + :vartype source_if_unmodified_since: ~datetime.datetime + :ivar source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :vartype source_if_match: str + :ivar source_if_none_match: Specify an ETag value to operate only on blobs without a matching + value. + :vartype source_if_none_match: str + :ivar source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with a + matching value. + :vartype source_if_tags: str + """ + + _attribute_map = { + "source_if_modified_since": {"key": "sourceIfModifiedSince", "type": "rfc-1123"}, + "source_if_unmodified_since": {"key": "sourceIfUnmodifiedSince", "type": "rfc-1123"}, + "source_if_match": {"key": "sourceIfMatch", "type": "str"}, + "source_if_none_match": {"key": "sourceIfNoneMatch", "type": "str"}, + "source_if_tags": {"key": "sourceIfTags", "type": "str"}, + } + + def __init__( + self, + *, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + source_if_tags: Optional[str] = None, + **kwargs + ): + """ + :keyword source_if_modified_since: Specify this header value to operate only on a blob if it + has been modified since the specified date/time. + :paramtype source_if_modified_since: ~datetime.datetime + :keyword source_if_unmodified_since: Specify this header value to operate only on a blob if it + has not been modified since the specified date/time. + :paramtype source_if_unmodified_since: ~datetime.datetime + :keyword source_if_match: Specify an ETag value to operate only on blobs with a matching value. + :paramtype source_if_match: str + :keyword source_if_none_match: Specify an ETag value to operate only on blobs without a + matching value. + :paramtype source_if_none_match: str + :keyword source_if_tags: Specify a SQL where clause on blob tags to operate only on blobs with + a matching value. + :paramtype source_if_tags: str + """ + super().__init__(**kwargs) + self.source_if_modified_since = source_if_modified_since + self.source_if_unmodified_since = source_if_unmodified_since + self.source_if_match = source_if_match + self.source_if_none_match = source_if_none_match + self.source_if_tags = source_if_tags + + +class StaticWebsite(_serialization.Model): + """The properties that enable an account to host a static website. + + All required parameters must be populated in order to send to Azure. + + :ivar enabled: Indicates whether this account is hosting a static website. Required. + :vartype enabled: bool + :ivar index_document: The default name of the index page under each directory. + :vartype index_document: str + :ivar error_document404_path: The absolute path of the custom 404 page. + :vartype error_document404_path: str + :ivar default_index_document_path: Absolute path of the default index page. + :vartype default_index_document_path: str + """ + + _validation = { + "enabled": {"required": True}, + } + + _attribute_map = { + "enabled": {"key": "Enabled", "type": "bool"}, + "index_document": {"key": "IndexDocument", "type": "str"}, + "error_document404_path": {"key": "ErrorDocument404Path", "type": "str"}, + "default_index_document_path": {"key": "DefaultIndexDocumentPath", "type": "str"}, + } + + def __init__( + self, + *, + enabled: bool, + index_document: Optional[str] = None, + error_document404_path: Optional[str] = None, + default_index_document_path: Optional[str] = None, + **kwargs + ): + """ + :keyword enabled: Indicates whether this account is hosting a static website. Required. + :paramtype enabled: bool + :keyword index_document: The default name of the index page under each directory. + :paramtype index_document: str + :keyword error_document404_path: The absolute path of the custom 404 page. + :paramtype error_document404_path: str + :keyword default_index_document_path: Absolute path of the default index page. + :paramtype default_index_document_path: str + """ + super().__init__(**kwargs) + self.enabled = enabled + self.index_document = index_document + self.error_document404_path = error_document404_path + self.default_index_document_path = default_index_document_path + + +class StorageError(_serialization.Model): + """StorageError. + + :ivar message: + :vartype message: str + """ + + _attribute_map = { + "message": {"key": "Message", "type": "str"}, + } + + def __init__(self, *, message: Optional[str] = None, **kwargs): + """ + :keyword message: + :paramtype message: str + """ + super().__init__(**kwargs) + self.message = message + + +class StorageServiceProperties(_serialization.Model): + """Storage Service Properties. + + :ivar logging: Azure Analytics Logging settings. + :vartype logging: ~azure.storage.blob.models.Logging + :ivar hour_metrics: a summary of request statistics grouped by API in hour or minute aggregates + for blobs. + :vartype hour_metrics: ~azure.storage.blob.models.Metrics + :ivar minute_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :vartype minute_metrics: ~azure.storage.blob.models.Metrics + :ivar cors: The set of CORS rules. + :vartype cors: list[~azure.storage.blob.models.CorsRule] + :ivar default_service_version: The default version to use for requests to the Blob service if + an incoming request's version is not specified. Possible values include version 2008-10-27 and + all more recent versions. + :vartype default_service_version: str + :ivar delete_retention_policy: the retention policy which determines how long the associated + data should persist. + :vartype delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :ivar static_website: The properties that enable an account to host a static website. + :vartype static_website: ~azure.storage.blob.models.StaticWebsite + """ + + _attribute_map = { + "logging": {"key": "Logging", "type": "Logging"}, + "hour_metrics": {"key": "HourMetrics", "type": "Metrics"}, + "minute_metrics": {"key": "MinuteMetrics", "type": "Metrics"}, + "cors": {"key": "Cors", "type": "[CorsRule]", "xml": {"wrapped": True}}, + "default_service_version": {"key": "DefaultServiceVersion", "type": "str"}, + "delete_retention_policy": {"key": "DeleteRetentionPolicy", "type": "RetentionPolicy"}, + "static_website": {"key": "StaticWebsite", "type": "StaticWebsite"}, + } + + def __init__( + self, + *, + logging: Optional["_models.Logging"] = None, + hour_metrics: Optional["_models.Metrics"] = None, + minute_metrics: Optional["_models.Metrics"] = None, + cors: Optional[List["_models.CorsRule"]] = None, + default_service_version: Optional[str] = None, + delete_retention_policy: Optional["_models.RetentionPolicy"] = None, + static_website: Optional["_models.StaticWebsite"] = None, + **kwargs + ): + """ + :keyword logging: Azure Analytics Logging settings. + :paramtype logging: ~azure.storage.blob.models.Logging + :keyword hour_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :paramtype hour_metrics: ~azure.storage.blob.models.Metrics + :keyword minute_metrics: a summary of request statistics grouped by API in hour or minute + aggregates for blobs. + :paramtype minute_metrics: ~azure.storage.blob.models.Metrics + :keyword cors: The set of CORS rules. + :paramtype cors: list[~azure.storage.blob.models.CorsRule] + :keyword default_service_version: The default version to use for requests to the Blob service + if an incoming request's version is not specified. Possible values include version 2008-10-27 + and all more recent versions. + :paramtype default_service_version: str + :keyword delete_retention_policy: the retention policy which determines how long the associated + data should persist. + :paramtype delete_retention_policy: ~azure.storage.blob.models.RetentionPolicy + :keyword static_website: The properties that enable an account to host a static website. + :paramtype static_website: ~azure.storage.blob.models.StaticWebsite + """ + super().__init__(**kwargs) + self.logging = logging + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + self.default_service_version = default_service_version + self.delete_retention_policy = delete_retention_policy + self.static_website = static_website + + +class StorageServiceStats(_serialization.Model): + """Stats for the storage service. + + :ivar geo_replication: Geo-Replication information for the Secondary Storage Service. + :vartype geo_replication: ~azure.storage.blob.models.GeoReplication + """ + + _attribute_map = { + "geo_replication": {"key": "GeoReplication", "type": "GeoReplication"}, + } + + def __init__(self, *, geo_replication: Optional["_models.GeoReplication"] = None, **kwargs): + """ + :keyword geo_replication: Geo-Replication information for the Secondary Storage Service. + :paramtype geo_replication: ~azure.storage.blob.models.GeoReplication + """ + super().__init__(**kwargs) + self.geo_replication = geo_replication + + +class UserDelegationKey(_serialization.Model): + """A user delegation key. + + All required parameters must be populated in order to send to Azure. + + :ivar signed_oid: The Azure Active Directory object ID in GUID format. Required. + :vartype signed_oid: str + :ivar signed_tid: The Azure Active Directory tenant ID in GUID format. Required. + :vartype signed_tid: str + :ivar signed_start: The date-time the key is active. Required. + :vartype signed_start: ~datetime.datetime + :ivar signed_expiry: The date-time the key expires. Required. + :vartype signed_expiry: ~datetime.datetime + :ivar signed_service: Abbreviation of the Azure Storage service that accepts the key. Required. + :vartype signed_service: str + :ivar signed_version: The service version that created the key. Required. + :vartype signed_version: str + :ivar value: The key as a base64 string. Required. + :vartype value: str + """ + + _validation = { + "signed_oid": {"required": True}, + "signed_tid": {"required": True}, + "signed_start": {"required": True}, + "signed_expiry": {"required": True}, + "signed_service": {"required": True}, + "signed_version": {"required": True}, + "value": {"required": True}, + } + + _attribute_map = { + "signed_oid": {"key": "SignedOid", "type": "str"}, + "signed_tid": {"key": "SignedTid", "type": "str"}, + "signed_start": {"key": "SignedStart", "type": "iso-8601"}, + "signed_expiry": {"key": "SignedExpiry", "type": "iso-8601"}, + "signed_service": {"key": "SignedService", "type": "str"}, + "signed_version": {"key": "SignedVersion", "type": "str"}, + "value": {"key": "Value", "type": "str"}, + } + + def __init__( + self, + *, + signed_oid: str, + signed_tid: str, + signed_start: datetime.datetime, + signed_expiry: datetime.datetime, + signed_service: str, + signed_version: str, + value: str, + **kwargs + ): + """ + :keyword signed_oid: The Azure Active Directory object ID in GUID format. Required. + :paramtype signed_oid: str + :keyword signed_tid: The Azure Active Directory tenant ID in GUID format. Required. + :paramtype signed_tid: str + :keyword signed_start: The date-time the key is active. Required. + :paramtype signed_start: ~datetime.datetime + :keyword signed_expiry: The date-time the key expires. Required. + :paramtype signed_expiry: ~datetime.datetime + :keyword signed_service: Abbreviation of the Azure Storage service that accepts the key. + Required. + :paramtype signed_service: str + :keyword signed_version: The service version that created the key. Required. + :paramtype signed_version: str + :keyword value: The key as a base64 string. Required. + :paramtype value: str + """ + super().__init__(**kwargs) + self.signed_oid = signed_oid + self.signed_tid = signed_tid + self.signed_start = signed_start + self.signed_expiry = signed_expiry + self.signed_service = signed_service + self.signed_version = signed_version + self.value = value diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_patch.py new file mode 100644 index 00000000000..029b47fe478 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/models/_patch.py @@ -0,0 +1,23 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import List +__all__ = [] # type: List[str] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/__init__.py new file mode 100644 index 00000000000..f8feb32687a --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/__init__.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._container_operations import ContainerOperations +from ._blob_operations import BlobOperations +from ._page_blob_operations import PageBlobOperations +from ._append_blob_operations import AppendBlobOperations +from ._block_blob_operations import BlockBlobOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ServiceOperations", + "ContainerOperations", + "BlobOperations", + "PageBlobOperations", + "AppendBlobOperations", + "BlockBlobOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_append_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_append_blob_operations.py new file mode 100644 index 00000000000..366954fef51 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_append_blob_operations.py @@ -0,0 +1,1095 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_create_request( + url: str, + *, + content_length: int, + timeout: Optional[int] = None, + blob_content_type: Optional[str] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_md5: Optional[bytes] = None, + blob_cache_control: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "AppendBlob")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str") + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if blob_content_type is not None: + _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str") + if blob_content_encoding is not None: + _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header( + "blob_content_encoding", blob_content_encoding, "str" + ) + if blob_content_language is not None: + _headers["x-ms-blob-content-language"] = _SERIALIZER.header( + "blob_content_language", blob_content_language, "str" + ) + if blob_content_md5 is not None: + _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray") + if blob_cache_control is not None: + _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if blob_content_disposition is not None: + _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header( + "blob_content_disposition", blob_content_disposition, "str" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if blob_tags_string is not None: + _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str") + if immutability_policy_expiry is not None: + _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header( + "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123" + ) + if immutability_policy_mode is not None: + _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header( + "immutability_policy_mode", immutability_policy_mode, "str" + ) + if legal_hold is not None: + _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_append_block_request( + url: str, + *, + content_length: int, + content: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + lease_id: Optional[str] = None, + max_size: Optional[int] = None, + append_position: Optional[int] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "appendblock")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + if transactional_content_crc64 is not None: + _headers["x-ms-content-crc64"] = _SERIALIZER.header( + "transactional_content_crc64", transactional_content_crc64, "bytearray" + ) + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if max_size is not None: + _headers["x-ms-blob-condition-maxsize"] = _SERIALIZER.header("max_size", max_size, "int") + if append_position is not None: + _headers["x-ms-blob-condition-appendpos"] = _SERIALIZER.header("append_position", append_position, "int") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_append_block_from_url_request( + url: str, + *, + source_url: str, + content_length: int, + source_range: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + lease_id: Optional[str] = None, + max_size: Optional[int] = None, + append_position: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "appendblock")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-copy-source"] = _SERIALIZER.header("source_url", source_url, "str") + if source_range is not None: + _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str") + if source_content_md5 is not None: + _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray") + if source_contentcrc64 is not None: + _headers["x-ms-source-content-crc64"] = _SERIALIZER.header( + "source_contentcrc64", source_contentcrc64, "bytearray" + ) + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if max_size is not None: + _headers["x-ms-blob-condition-maxsize"] = _SERIALIZER.header("max_size", max_size, "int") + if append_position is not None: + _headers["x-ms-blob-condition-appendpos"] = _SERIALIZER.header("append_position", append_position, "int") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + if source_if_modified_since is not None: + _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header( + "source_if_modified_since", source_if_modified_since, "rfc-1123" + ) + if source_if_unmodified_since is not None: + _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header( + "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123" + ) + if source_if_match is not None: + _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str") + if source_if_none_match is not None: + _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if copy_source_authorization is not None: + _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header( + "copy_source_authorization", copy_source_authorization, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_seal_request( + url: str, + *, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + append_position: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "seal")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if append_position is not None: + _headers["x-ms-blob-condition-appendpos"] = _SERIALIZER.header("append_position", append_position, "int") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +class AppendBlobOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.AzureBlobStorage`'s + :attr:`append_blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def create( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Create Append Blob operation creates a new append blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "AppendBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "AppendBlob")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_create_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + blob_type=blob_type, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def append_block( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob. + The Append Block operation is permitted only if the blob was created with x-ms-blob-type set to + AppendBlob. Append Block is supported only on version 2015-02-21 version or later. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. Default value is None. + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "appendblock". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "appendblock")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _max_size = None + _append_position = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + _max_size = append_position_access_conditions.max_size + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = body + + request = build_append_block_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + lease_id=_lease_id, + max_size=_max_size, + append_position=_append_position, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.append_block.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-append-offset"] = self._deserialize( + "str", response.headers.get("x-ms-blob-append-offset") + ) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def append_block_from_url( # pylint: disable=inconsistent-return-statements + self, + source_url: str, + content_length: int, + source_range: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Append Block operation commits a new block of data to the end of an existing append blob + where the contents are read from a source url. The Append Block operation is permitted only if + the blob was created with x-ms-blob-type set to AppendBlob. Append Block is supported only on + version 2015-02-21 version or later. + + :param source_url: Specify a URL to the copy source. Required. + :type source_url: str + :param content_length: The length of the request. Required. + :type content_length: int + :param source_range: Bytes of source data in the specified range. Default value is None. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_contentcrc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param append_position_access_conditions: Parameter group. Default value is None. + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword comp: comp. Default value is "appendblock". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "appendblock")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _max_size = None + _append_position = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + _max_size = append_position_access_conditions.max_size + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_append_block_from_url_request( + url=self._config.url, + source_url=source_url, + content_length=content_length, + source_range=source_range, + source_content_md5=source_content_md5, + source_contentcrc64=source_contentcrc64, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + lease_id=_lease_id, + max_size=_max_size, + append_position=_append_position, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + request_id_parameter=request_id_parameter, + copy_source_authorization=copy_source_authorization, + comp=comp, + version=self._config.version, + template_url=self.append_block_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-append-offset"] = self._deserialize( + "str", response.headers.get("x-ms-blob-append-offset") + ) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + append_block_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def seal( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + append_position_access_conditions: Optional[_models.AppendPositionAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Seal operation seals the Append Blob to make it read-only. Seal is supported only on + version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Parameter group. Default value is None. + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :keyword comp: comp. Default value is "seal". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "seal")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _append_position = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if append_position_access_conditions is not None: + _append_position = append_position_access_conditions.append_position + + request = build_seal_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + append_position=_append_position, + comp=comp, + version=self._config.version, + template_url=self.seal.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + + if cls: + return cls(pipeline_response, None, response_headers) + + seal.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_blob_operations.py new file mode 100644 index 00000000000..bbe2f1427b0 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_blob_operations.py @@ -0,0 +1,4557 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, Iterator, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_download_request( + url: str, + *, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_id: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + range_get_content_crc64: Optional[bool] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if version_id is not None: + _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if range is not None: + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if range_get_content_md5 is not None: + _headers["x-ms-range-get-content-md5"] = _SERIALIZER.header( + "range_get_content_md5", range_get_content_md5, "bool" + ) + if range_get_content_crc64 is not None: + _headers["x-ms-range-get-content-crc64"] = _SERIALIZER.header( + "range_get_content_crc64", range_get_content_crc64, "bool" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_properties_request( + url: str, + *, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if version_id is not None: + _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + url: str, + *, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + blob_delete_type: str = "Permanent", + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if version_id is not None: + _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if blob_delete_type is not None: + _params["deletetype"] = _SERIALIZER.query("blob_delete_type", blob_delete_type, "str") + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if delete_snapshots is not None: + _headers["x-ms-delete-snapshots"] = _SERIALIZER.header("delete_snapshots", delete_snapshots, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_undelete_request( + url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "undelete")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_expiry_request( + url: str, + *, + expiry_options: Union[str, "_models.BlobExpiryOptions"], + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + expires_on: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "expiry")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["x-ms-expiry-option"] = _SERIALIZER.header("expiry_options", expiry_options, "str") + if expires_on is not None: + _headers["x-ms-expiry-time"] = _SERIALIZER.header("expires_on", expires_on, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_http_headers_request( + url: str, + *, + timeout: Optional[int] = None, + blob_cache_control: Optional[str] = None, + blob_content_type: Optional[str] = None, + blob_content_md5: Optional[bytes] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if blob_cache_control is not None: + _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str") + if blob_content_type is not None: + _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str") + if blob_content_md5 is not None: + _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray") + if blob_content_encoding is not None: + _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header( + "blob_content_encoding", blob_content_encoding, "str" + ) + if blob_content_language is not None: + _headers["x-ms-blob-content-language"] = _SERIALIZER.header( + "blob_content_language", blob_content_language, "str" + ) + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + if blob_content_disposition is not None: + _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header( + "blob_content_disposition", blob_content_disposition, "str" + ) + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_immutability_policy_request( + url: str, + *, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if immutability_policy_expiry is not None: + _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header( + "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123" + ) + if immutability_policy_mode is not None: + _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header( + "immutability_policy_mode", immutability_policy_mode, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_immutability_policy_request( + url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_legal_hold_request( + url: str, + *, + legal_hold: bool, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "legalhold")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_metadata_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_acquire_lease_request( + url: str, + *, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if duration is not None: + _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int") + if proposed_lease_id is not None: + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_release_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_renew_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_change_lease_request( + url: str, + *, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_break_lease_request( + url: str, + *, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if break_period is not None: + _headers["x-ms-lease-break-period"] = _SERIALIZER.header("break_period", break_period, "int") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_snapshot_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "snapshot")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_start_copy_from_url_request( + url: str, + *, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + source_if_tags: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + seal_blob: Optional[bool] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str") + if rehydrate_priority is not None: + _headers["x-ms-rehydrate-priority"] = _SERIALIZER.header("rehydrate_priority", rehydrate_priority, "str") + if source_if_modified_since is not None: + _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header( + "source_if_modified_since", source_if_modified_since, "rfc-1123" + ) + if source_if_unmodified_since is not None: + _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header( + "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123" + ) + if source_if_match is not None: + _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str") + if source_if_none_match is not None: + _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str") + if source_if_tags is not None: + _headers["x-ms-source-if-tags"] = _SERIALIZER.header("source_if_tags", source_if_tags, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if blob_tags_string is not None: + _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str") + if seal_blob is not None: + _headers["x-ms-seal-blob"] = _SERIALIZER.header("seal_blob", seal_blob, "bool") + if immutability_policy_expiry is not None: + _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header( + "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123" + ) + if immutability_policy_mode is not None: + _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header( + "immutability_policy_mode", immutability_policy_mode, "str" + ) + if legal_hold is not None: + _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_copy_from_url_request( + url: str, + *, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + encryption_scope: Optional[str] = None, + copy_source_tags: Optional[Union[str, "_models.BlobCopySourceTags"]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + x_ms_requires_sync = kwargs.pop("x_ms_requires_sync", _headers.pop("x-ms-requires-sync", "true")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-requires-sync"] = _SERIALIZER.header("x_ms_requires_sync", x_ms_requires_sync, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str") + if source_if_modified_since is not None: + _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header( + "source_if_modified_since", source_if_modified_since, "rfc-1123" + ) + if source_if_unmodified_since is not None: + _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header( + "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123" + ) + if source_if_match is not None: + _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str") + if source_if_none_match is not None: + _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if source_content_md5 is not None: + _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray") + if blob_tags_string is not None: + _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str") + if immutability_policy_expiry is not None: + _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header( + "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123" + ) + if immutability_policy_mode is not None: + _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header( + "immutability_policy_mode", immutability_policy_mode, "str" + ) + if legal_hold is not None: + _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool") + if copy_source_authorization is not None: + _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header( + "copy_source_authorization", copy_source_authorization, "str" + ) + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if copy_source_tags is not None: + _headers["x-ms-copy-source-tag-option"] = _SERIALIZER.header("copy_source_tags", copy_source_tags, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_abort_copy_from_url_request( + url: str, + *, + copy_id: str, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "copy")) # type: str + copy_action_abort_constant = kwargs.pop( + "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort") + ) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["copyid"] = _SERIALIZER.query("copy_id", copy_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-copy-action"] = _SERIALIZER.header("copy_action_abort_constant", copy_action_abort_constant, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_tier_request( + url: str, + *, + tier: Union[str, "_models.AccessTierRequired"], + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + lease_id: Optional[str] = None, + if_tags: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tier")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if version_id is not None: + _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str") + if rehydrate_priority is not None: + _headers["x-ms-rehydrate-priority"] = _SERIALIZER.header("rehydrate_priority", rehydrate_priority, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_account_info_request(url: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_query_request( + url: str, + *, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + content: Any = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "query")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_get_tags_request( + url: str, + *, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + if_tags: Optional[str] = None, + lease_id: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tags")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if version_id is not None: + _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_tags_request( + url: str, + *, + timeout: Optional[int] = None, + version_id: Optional[str] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + request_id_parameter: Optional[str] = None, + if_tags: Optional[str] = None, + lease_id: Optional[str] = None, + content: Any = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tags")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if version_id is not None: + _params["versionid"] = _SERIALIZER.query("version_id", version_id, "str") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + if transactional_content_crc64 is not None: + _headers["x-ms-content-crc64"] = _SERIALIZER.header( + "transactional_content_crc64", transactional_content_crc64, "bytearray" + ) + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +class BlobOperations: # pylint: disable=too-many-public-methods + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.AzureBlobStorage`'s + :attr:`blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def download( + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + range_get_content_crc64: Optional[bool] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> Iterator[bytes]: + """The Download operation reads or downloads a blob from the system, including its metadata and + properties. You can also call Download to read a snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param range_get_content_md5: When set to true and specified together with the Range, the + service returns the MD5 hash for the range, as long as the range is less than or equal to 4 MB + in size. Default value is None. + :type range_get_content_md5: bool + :param range_get_content_crc64: When set to true and specified together with the Range, the + service returns the CRC64 hash for the range, as long as the range is less than or equal to 4 + MB in size. Default value is None. + :type range_get_content_crc64: bool + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Iterator of the response bytes or the result of cls(response) + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[Iterator[bytes]] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_download_request( + url=self._config.url, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + range=range, + lease_id=_lease_id, + range_get_content_md5=range_get_content_md5, + range_get_content_crc64=range_get_content_crc64, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + version=self._config.version, + template_url=self.download.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-creation-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-creation-time") + ) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id")) + response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["x-ms-is-current-version"] = self._deserialize( + "bool", response.headers.get("x-ms-is-current-version") + ) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + response_headers["x-ms-last-access-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-last-access-time") + ) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-creation-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-creation-time") + ) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id")) + response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["x-ms-is-current-version"] = self._deserialize( + "bool", response.headers.get("x-ms-is-current-version") + ) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + response_headers["x-ms-last-access-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-last-access-time") + ) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + download.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def get_properties( # pylint: disable=inconsistent-return-statements + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_get_properties_request( + url=self._config.url, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-creation-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-creation-time") + ) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-or-policy-id"] = self._deserialize("str", response.headers.get("x-ms-or-policy-id")) + response_headers["x-ms-or"] = self._deserialize("{str}", response.headers.get("x-ms-or")) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress")) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-incremental-copy"] = self._deserialize( + "bool", response.headers.get("x-ms-incremental-copy") + ) + response_headers["x-ms-copy-destination-snapshot"] = self._deserialize( + "str", response.headers.get("x-ms-copy-destination-snapshot") + ) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition")) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier")) + response_headers["x-ms-access-tier-inferred"] = self._deserialize( + "bool", response.headers.get("x-ms-access-tier-inferred") + ) + response_headers["x-ms-archive-status"] = self._deserialize("str", response.headers.get("x-ms-archive-status")) + response_headers["x-ms-access-tier-change-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-access-tier-change-time") + ) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["x-ms-is-current-version"] = self._deserialize( + "bool", response.headers.get("x-ms-is-current-version") + ) + response_headers["x-ms-tag-count"] = self._deserialize("int", response.headers.get("x-ms-tag-count")) + response_headers["x-ms-expiry-time"] = self._deserialize("rfc-1123", response.headers.get("x-ms-expiry-time")) + response_headers["x-ms-blob-sealed"] = self._deserialize("bool", response.headers.get("x-ms-blob-sealed")) + response_headers["x-ms-rehydrate-priority"] = self._deserialize( + "str", response.headers.get("x-ms-rehydrate-priority") + ) + response_headers["x-ms-last-access-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-last-access-time") + ) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]] = None, + request_id_parameter: Optional[str] = None, + blob_delete_type: str = "Permanent", + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is + permanently removed from the storage account. If the storage account's soft delete feature is + enabled, then, when a blob is deleted, it is marked for deletion and becomes inaccessible + immediately. However, the blob service retains the blob or snapshot for the number of days + specified by the DeleteRetentionPolicy section of [Storage service properties] + (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's + data is permanently removed from the storage account. Note that you continue to be charged for + the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and + specify the "include=deleted" query parameter to discover which blobs and snapshots have been + soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other + operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code + of 404 (ResourceNotFound). + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param delete_snapshots: Required if the blob has associated snapshots. Specify one of the + following two options: include: Delete the base blob and all of its snapshots. only: Delete + only the blob's snapshots and not the blob itself. Known values are: "include" and "only". + Default value is None. + :type delete_snapshots: str or ~azure.storage.blob.models.DeleteSnapshotsOptionType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_delete_type: Optional. Only possible value is 'permanent', which specifies to + permanently delete a blob if blob soft delete is enabled. Known values are "Permanent" and + None. Default value is "Permanent". + :type blob_delete_type: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_delete_request( + url=self._config.url, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + lease_id=_lease_id, + delete_snapshots=delete_snapshots, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_delete_type=blob_delete_type, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def undelete( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """Undelete a blob that was previously soft deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "undelete". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "undelete")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_undelete_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.undelete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + undelete.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def set_expiry( # pylint: disable=inconsistent-return-statements + self, + expiry_options: Union[str, "_models.BlobExpiryOptions"], + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + expires_on: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. Known values are: + "NeverExpire", "RelativeToCreation", "RelativeToNow", and "Absolute". Required. + :type expiry_options: str or ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param expires_on: The time to set the blob to expiry. Default value is None. + :type expires_on: str + :keyword comp: comp. Default value is "expiry". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "expiry")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_set_expiry_request( + url=self._config.url, + expiry_options=expiry_options, + timeout=timeout, + request_id_parameter=request_id_parameter, + expires_on=expires_on, + comp=comp, + version=self._config.version, + template_url=self.set_expiry.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_expiry.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def set_http_headers( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set HTTP Headers operation sets system properties on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_cache_control = None + _blob_content_type = None + _blob_content_md5 = None + _blob_content_encoding = None + _blob_content_language = None + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _blob_content_disposition = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_set_http_headers_request( + url=self._config.url, + timeout=timeout, + blob_cache_control=_blob_cache_control, + blob_content_type=_blob_content_type, + blob_content_md5=_blob_content_md5, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + blob_content_disposition=_blob_content_disposition, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.set_http_headers.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def set_immutability_policy( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set Immutability Policy operation sets the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "immutabilityPolicies". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_set_immutability_policy_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + if_unmodified_since=_if_unmodified_since, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + comp=comp, + version=self._config.version, + template_url=self.set_immutability_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-immutability-policy-until-date"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-immutability-policy-until-date") + ) + response_headers["x-ms-immutability-policy-mode"] = self._deserialize( + "str", response.headers.get("x-ms-immutability-policy-mode") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_immutability_policy.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def delete_immutability_policy( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "immutabilityPolicies". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "immutabilityPolicies")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_delete_immutability_policy_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.delete_immutability_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete_immutability_policy.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def set_legal_hold( # pylint: disable=inconsistent-return-statements + self, legal_hold: bool, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """The Set Legal Hold operation sets a legal hold on the blob. + + :param legal_hold: Specified if a legal hold should be set on the blob. Required. + :type legal_hold: bool + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "legalhold". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "legalhold")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_set_legal_hold_request( + url=self._config.url, + legal_hold=legal_hold, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.set_legal_hold.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-legal-hold")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_legal_hold.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or + more name-value pairs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def release_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def renew_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "renew". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_renew_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.renew_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Required. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + proposed_lease_id=proposed_lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. Default value is None. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + break_period=break_period, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def create_snapshot( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Create Snapshot operation creates a read-only snapshot of a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword comp: comp. Default value is "snapshot". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "snapshot")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_create_snapshot_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.create_snapshot.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def start_copy_from_url( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + seal_blob: Optional[bool] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Start Copy From URL operation copies a blob or an internet resource to a new blob. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. Known values are: "High" and "Standard". Default value is None. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. Service version + 2019-12-12 and newer. Default value is None. + :type seal_blob: bool + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_start_copy_from_url_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + metadata=metadata, + tier=tier, + rehydrate_priority=rehydrate_priority, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + source_if_tags=_source_if_tags, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + seal_blob=seal_blob, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + version=self._config.version, + template_url=self.start_copy_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def copy_from_url( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + copy_source_tags: Optional[Union[str, "_models.BlobCopySourceTags"]] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + **kwargs: Any + ) -> None: + """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not + return a response until the copy is complete. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param copy_source_tags: Optional, default 'replace'. Indicates if source tags should be + copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and + "COPY". Default value is None. + :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword x_ms_requires_sync: This header indicates that this is a synchronous Copy Blob From + URL instead of a Asynchronous Copy Blob. Default value is "true". Note that overriding this + default value may result in unsupported behavior. + :paramtype x_ms_requires_sync: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + x_ms_requires_sync = kwargs.pop("x_ms_requires_sync", _headers.pop("x-ms-requires-sync", "true")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _lease_id = None + _encryption_scope = None + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + + request = build_copy_from_url_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + metadata=metadata, + tier=tier, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + source_content_md5=source_content_md5, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + copy_source_authorization=copy_source_authorization, + encryption_scope=_encryption_scope, + copy_source_tags=copy_source_tags, + x_ms_requires_sync=x_ms_requires_sync, + version=self._config.version, + template_url=self.copy_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def abort_copy_from_url( # pylint: disable=inconsistent-return-statements + self, + copy_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a + destination blob with zero length and full metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + Blob operation. Required. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword comp: comp. Default value is "copy". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword copy_action_abort_constant: Copy action. Default value is "abort". Note that + overriding this default value may result in unsupported behavior. + :paramtype copy_action_abort_constant: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "copy")) # type: str + copy_action_abort_constant = kwargs.pop( + "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort") + ) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_abort_copy_from_url_request( + url=self._config.url, + copy_id=copy_id, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + comp=comp, + copy_action_abort_constant=copy_action_abort_constant, + version=self._config.version, + template_url=self.abort_copy_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def set_tier( # pylint: disable=inconsistent-return-statements + self, + tier: Union[str, "_models.AccessTierRequired"], + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + timeout: Optional[int] = None, + rehydrate_priority: Optional[Union[str, "_models.RehydratePriority"]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a + premium storage account and on a block blob in a blob storage account (locally redundant + storage only). A premium page blob's tier determines the allowed size, IOPS, and bandwidth of + the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation does not + update the blob's ETag. + + :param tier: Indicates the tier to be set on the blob. Known values are: "P4", "P6", "P10", + "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and "Cold". + Required. + :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param rehydrate_priority: Optional: Indicates the priority with which to rehydrate an archived + blob. Known values are: "High" and "Standard". Default value is None. + :type rehydrate_priority: str or ~azure.storage.blob.models.RehydratePriority + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "tier". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tier")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + + request = build_set_tier_request( + url=self._config.url, + tier=tier, + snapshot=snapshot, + version_id=version_id, + timeout=timeout, + rehydrate_priority=rehydrate_priority, + request_id_parameter=request_id_parameter, + lease_id=_lease_id, + if_tags=_if_tags, + comp=comp, + version=self._config.version, + template_url=self.set_tier.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if response.status_code == 202: + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tier.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Returns the sku name and account kind. + + :keyword restype: restype. Default value is "account". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_get_account_info_request( + url=self._config.url, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_account_info.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name")) + response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def query( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + query_request: Optional[_models.QueryRequest] = None, + **kwargs: Any + ) -> Iterator[bytes]: + """The Query operation enables users to select/project on blob data by providing simple query + expressions. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param query_request: the query request. Default value is None. + :type query_request: ~azure.storage.blob.models.QueryRequest + :keyword comp: comp. Default value is "query". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Iterator of the response bytes or the result of cls(response) + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "query")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[Iterator[bytes]] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if query_request is not None: + _content = self._serialize.body(query_request, "QueryRequest", is_xml=True) + else: + _content = None + + request = build_query_request( + url=self._config.url, + snapshot=snapshot, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.query.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-blob-type"] = self._deserialize("str", response.headers.get("x-ms-blob-type")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-committed-block-count"] = self._deserialize( + "int", response.headers.get("x-ms-blob-committed-block-count") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + response_headers["x-ms-blob-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-blob-content-md5") + ) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + query.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def get_tags( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + snapshot: Optional[str] = None, + version_id: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> _models.BlobTags: + """The Get Tags operation enables users to get the tags associated with a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword comp: comp. Default value is "tags". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlobTags or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tags")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.BlobTags] + + _if_tags = None + _lease_id = None + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_tags_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + snapshot=snapshot, + version_id=version_id, + if_tags=_if_tags, + lease_id=_lease_id, + comp=comp, + version=self._config.version, + template_url=self.get_tags.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("BlobTags", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_tags.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def set_tags( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + version_id: Optional[str] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + tags: Optional[_models.BlobTags] = None, + **kwargs: Any + ) -> None: + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param version_id: The version id parameter is an opaque DateTime value that, when present, + specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + Default value is None. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param tags: Blob tags. Default value is None. + :type tags: ~azure.storage.blob.models.BlobTags + :keyword comp: comp. Default value is "tags". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "tags")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_tags = None + _lease_id = None + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if tags is not None: + _content = self._serialize.body(tags, "BlobTags", is_xml=True) + else: + _content = None + + request = build_set_tags_request( + url=self._config.url, + timeout=timeout, + version_id=version_id, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + request_id_parameter=request_id_parameter, + if_tags=_if_tags, + lease_id=_lease_id, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_tags.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_tags.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_block_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_block_blob_operations.py new file mode 100644 index 00000000000..5435f0a2c33 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_block_blob_operations.py @@ -0,0 +1,1748 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_upload_request( + url: str, + *, + content_length: int, + content: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + blob_content_type: Optional[str] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_md5: Optional[bytes] = None, + blob_cache_control: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + transactional_content_crc64: Optional[bytes] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if blob_content_type is not None: + _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str") + if blob_content_encoding is not None: + _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header( + "blob_content_encoding", blob_content_encoding, "str" + ) + if blob_content_language is not None: + _headers["x-ms-blob-content-language"] = _SERIALIZER.header( + "blob_content_language", blob_content_language, "str" + ) + if blob_content_md5 is not None: + _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray") + if blob_cache_control is not None: + _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if blob_content_disposition is not None: + _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header( + "blob_content_disposition", blob_content_disposition, "str" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if blob_tags_string is not None: + _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str") + if immutability_policy_expiry is not None: + _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header( + "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123" + ) + if immutability_policy_mode is not None: + _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header( + "immutability_policy_mode", immutability_policy_mode, "str" + ) + if legal_hold is not None: + _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool") + if transactional_content_crc64 is not None: + _headers["x-ms-content-crc64"] = _SERIALIZER.header( + "transactional_content_crc64", transactional_content_crc64, "bytearray" + ) + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_put_blob_from_url_request( + url: str, + *, + content_length: int, + copy_source: str, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + blob_content_type: Optional[str] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_md5: Optional[bytes] = None, + blob_cache_control: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + source_if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + blob_tags_string: Optional[str] = None, + copy_source_blob_properties: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + copy_source_tags: Optional[Union[str, "_models.BlobCopySourceTags"]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if blob_content_type is not None: + _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str") + if blob_content_encoding is not None: + _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header( + "blob_content_encoding", blob_content_encoding, "str" + ) + if blob_content_language is not None: + _headers["x-ms-blob-content-language"] = _SERIALIZER.header( + "blob_content_language", blob_content_language, "str" + ) + if blob_content_md5 is not None: + _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray") + if blob_cache_control is not None: + _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if blob_content_disposition is not None: + _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header( + "blob_content_disposition", blob_content_disposition, "str" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + if source_if_modified_since is not None: + _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header( + "source_if_modified_since", source_if_modified_since, "rfc-1123" + ) + if source_if_unmodified_since is not None: + _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header( + "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123" + ) + if source_if_match is not None: + _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str") + if source_if_none_match is not None: + _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str") + if source_if_tags is not None: + _headers["x-ms-source-if-tags"] = _SERIALIZER.header("source_if_tags", source_if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if source_content_md5 is not None: + _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray") + if blob_tags_string is not None: + _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str") + _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str") + if copy_source_blob_properties is not None: + _headers["x-ms-copy-source-blob-properties"] = _SERIALIZER.header( + "copy_source_blob_properties", copy_source_blob_properties, "bool" + ) + if copy_source_authorization is not None: + _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header( + "copy_source_authorization", copy_source_authorization, "str" + ) + if copy_source_tags is not None: + _headers["x-ms-copy-source-tag-option"] = _SERIALIZER.header("copy_source_tags", copy_source_tags, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_stage_block_request( + url: str, + *, + block_id: str, + content_length: int, + content: IO, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "block")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["blockid"] = _SERIALIZER.query("block_id", block_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + if transactional_content_crc64 is not None: + _headers["x-ms-content-crc64"] = _SERIALIZER.header( + "transactional_content_crc64", transactional_content_crc64, "bytearray" + ) + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_stage_block_from_url_request( + url: str, + *, + block_id: str, + content_length: int, + source_url: str, + source_range: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + lease_id: Optional[str] = None, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "block")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["blockid"] = _SERIALIZER.query("block_id", block_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + _headers["x-ms-copy-source"] = _SERIALIZER.header("source_url", source_url, "str") + if source_range is not None: + _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str") + if source_content_md5 is not None: + _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray") + if source_contentcrc64 is not None: + _headers["x-ms-source-content-crc64"] = _SERIALIZER.header( + "source_contentcrc64", source_contentcrc64, "bytearray" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if source_if_modified_since is not None: + _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header( + "source_if_modified_since", source_if_modified_since, "rfc-1123" + ) + if source_if_unmodified_since is not None: + _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header( + "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123" + ) + if source_if_match is not None: + _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str") + if source_if_none_match is not None: + _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if copy_source_authorization is not None: + _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header( + "copy_source_authorization", copy_source_authorization, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_commit_block_list_request( + url: str, + *, + content: Any, + timeout: Optional[int] = None, + blob_cache_control: Optional[str] = None, + blob_content_type: Optional[str] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_md5: Optional[bytes] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blocklist")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if blob_cache_control is not None: + _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str") + if blob_content_type is not None: + _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str") + if blob_content_encoding is not None: + _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header( + "blob_content_encoding", blob_content_encoding, "str" + ) + if blob_content_language is not None: + _headers["x-ms-blob-content-language"] = _SERIALIZER.header( + "blob_content_language", blob_content_language, "str" + ) + if blob_content_md5 is not None: + _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + if transactional_content_crc64 is not None: + _headers["x-ms-content-crc64"] = _SERIALIZER.header( + "transactional_content_crc64", transactional_content_crc64, "bytearray" + ) + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if blob_content_disposition is not None: + _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header( + "blob_content_disposition", blob_content_disposition, "str" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if blob_tags_string is not None: + _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str") + if immutability_policy_expiry is not None: + _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header( + "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123" + ) + if immutability_policy_mode is not None: + _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header( + "immutability_policy_mode", immutability_policy_mode, "str" + ) + if legal_hold is not None: + _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_get_block_list_request( + url: str, + *, + snapshot: Optional[str] = None, + list_type: Union[str, "_models.BlockListType"] = "committed", + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blocklist")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + _params["blocklisttype"] = _SERIALIZER.query("list_type", list_type, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class BlockBlobOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.AzureBlobStorage`'s + :attr:`block_blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def upload( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + transactional_content_crc64: Optional[bytes] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Upload Block Blob operation updates the content of an existing block blob. Updating an + existing block blob overwrites any existing metadata on the blob. Partial updates are not + supported with Put Blob; the content of the existing blob is overwritten with the content of + the new blob. To perform a partial update of the content of a block blob, use the Put Block + List operation. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "BlockBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = body + + request = build_upload_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + tier=tier, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + transactional_content_crc64=transactional_content_crc64, + blob_type=blob_type, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.upload.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def put_blob_from_url( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + copy_source: str, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + blob_tags_string: Optional[str] = None, + copy_source_blob_properties: Optional[bool] = None, + copy_source_authorization: Optional[str] = None, + copy_source_tags: Optional[Union[str, "_models.BlobCopySourceTags"]] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Put Blob from URL operation creates a new Block Blob where the contents of the blob are + read from a given URL. This API is supported beginning with the 2020-04-08 version. Partial + updates are not supported with Put Blob from URL; the content of an existing blob is + overwritten with the content of the new blob. To perform partial updates to a block blob’s + contents using a source URL, use the Put Block from URL API in conjunction with Put Block List. + + :param content_length: The length of the request. Required. + :type content_length: int + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param copy_source_blob_properties: Optional, default is true. Indicates if properties from + the source blob should be copied. Default value is None. + :type copy_source_blob_properties: bool + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param copy_source_tags: Optional, default 'replace'. Indicates if source tags should be + copied or replaced with the tags specified by x-ms-tags. Known values are: "REPLACE" and + "COPY". Default value is None. + :type copy_source_tags: str or ~azure.storage.blob.models.BlobCopySourceTags + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "BlockBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "BlockBlob")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + _source_if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_tags = source_modified_access_conditions.source_if_tags + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_put_blob_from_url_request( + url=self._config.url, + content_length=content_length, + copy_source=copy_source, + timeout=timeout, + transactional_content_md5=transactional_content_md5, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + tier=tier, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + source_if_tags=_source_if_tags, + request_id_parameter=request_id_parameter, + source_content_md5=source_content_md5, + blob_tags_string=blob_tags_string, + copy_source_blob_properties=copy_source_blob_properties, + copy_source_authorization=copy_source_authorization, + copy_source_tags=copy_source_tags, + blob_type=blob_type, + version=self._config.version, + template_url=self.put_blob_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + put_blob_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def stage_block( # pylint: disable=inconsistent-return-statements + self, + block_id: str, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + **kwargs: Any + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. Required. + :type block_id: str + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :keyword comp: comp. Default value is "block". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "block")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + _content = body + + request = build_stage_block_request( + url=self._config.url, + block_id=block_id, + content_length=content_length, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + request_id_parameter=request_id_parameter, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.stage_block.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def stage_block_from_url( # pylint: disable=inconsistent-return-statements + self, + block_id: str, + content_length: int, + source_url: str, + source_range: Optional[str] = None, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Stage Block operation creates a new block to be committed as part of a blob where the + contents are read from a URL. + + :param block_id: A valid Base64 string value that identifies the block. Prior to encoding, the + string must be less than or equal to 64 bytes in size. For a given blob, the length of the + value specified for the blockid parameter must be the same size for each block. Required. + :type block_id: str + :param content_length: The length of the request. Required. + :type content_length: int + :param source_url: Specify a URL to the copy source. Required. + :type source_url: str + :param source_range: Bytes of source data in the specified range. Default value is None. + :type source_range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_contentcrc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword comp: comp. Default value is "block". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "block")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_stage_block_from_url_request( + url=self._config.url, + block_id=block_id, + content_length=content_length, + source_url=source_url, + source_range=source_range, + source_content_md5=source_content_md5, + source_contentcrc64=source_contentcrc64, + timeout=timeout, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + lease_id=_lease_id, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + request_id_parameter=request_id_parameter, + copy_source_authorization=copy_source_authorization, + comp=comp, + version=self._config.version, + template_url=self.stage_block_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + stage_block_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def commit_block_list( # pylint: disable=inconsistent-return-statements + self, + blocks: _models.BlockLookupList, + timeout: Optional[int] = None, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + metadata: Optional[Dict[str, str]] = None, + tier: Optional[Union[str, "_models.AccessTierOptional"]] = None, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Commit Block List operation writes a blob by specifying the list of block IDs that make up + the blob. In order to be written as part of a blob, a block must have been successfully written + to the server in a prior Put Block operation. You can call Put Block List to update a blob by + uploading only those blocks that have changed, then committing the new and existing blocks + together. You can do this by specifying whether to commit a block from the committed block list + or from the uncommitted block list, or to commit the most recently uploaded version of the + block, whichever list it may belong to. + + :param blocks: Blob Blocks. Required. + :type blocks: ~azure.storage.blob.models.BlockLookupList + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param tier: Optional. Indicates the tier to be set on the blob. Known values are: "P4", "P6", + "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "Hot", "Cool", "Archive", and + "Cold". Default value is None. + :type tier: str or ~azure.storage.blob.models.AccessTierOptional + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "blocklist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blocklist")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_cache_control = None + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = self._serialize.body(blocks, "BlockLookupList", is_xml=True) + + request = build_commit_block_list_request( + url=self._config.url, + timeout=timeout, + blob_cache_control=_blob_cache_control, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + tier=tier, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.commit_block_list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + commit_block_list.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def get_block_list( + self, + snapshot: Optional[str] = None, + list_type: Union[str, "_models.BlockListType"] = "committed", + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> _models.BlockList: + """The Get Block List operation retrieves the list of blocks that have been uploaded as part of a + block blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param list_type: Specifies whether to return the list of committed blocks, the list of + uncommitted blocks, or both lists together. Known values are: "committed", "uncommitted", and + "all". Default value is "committed". + :type list_type: str or ~azure.storage.blob.models.BlockListType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "blocklist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: BlockList or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlockList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blocklist")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.BlockList] + + _lease_id = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_tags = modified_access_conditions.if_tags + + request = build_get_block_list_request( + url=self._config.url, + snapshot=snapshot, + list_type=list_type, + timeout=timeout, + lease_id=_lease_id, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.get_block_list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-blob-content-length"] = self._deserialize( + "int", response.headers.get("x-ms-blob-content-length") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("BlockList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_block_list.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_container_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_container_operations.py new file mode 100644 index 00000000000..7b95b3e481e --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_container_operations.py @@ -0,0 +1,2695 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, IO, Iterator, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_create_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + default_encryption_scope: Optional[str] = None, + prevent_encryption_scope_override: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if access is not None: + _headers["x-ms-blob-public-access"] = _SERIALIZER.header("access", access, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if default_encryption_scope is not None: + _headers["x-ms-default-encryption-scope"] = _SERIALIZER.header( + "default_encryption_scope", default_encryption_scope, "str" + ) + if prevent_encryption_scope_override is not None: + _headers["x-ms-deny-encryption-scope-override"] = _SERIALIZER.header( + "prevent_encryption_scope_override", prevent_encryption_scope_override, "bool" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_properties_request( + url: str, + *, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + url: str, + *, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_metadata_request( + url: str, + *, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + if_modified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_access_policy_request( + url: str, + *, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_access_policy_request( + url: str, + *, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + content: Any = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if access is not None: + _headers["x-ms-blob-public-access"] = _SERIALIZER.header("access", access, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_restore_request( + url: str, + *, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_container_name: Optional[str] = None, + deleted_container_version: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "undelete")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if deleted_container_name is not None: + _headers["x-ms-deleted-container-name"] = _SERIALIZER.header( + "deleted_container_name", deleted_container_name, "str" + ) + if deleted_container_version is not None: + _headers["x-ms-deleted-container-version"] = _SERIALIZER.header( + "deleted_container_version", deleted_container_version, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_rename_request( + url: str, + *, + source_container_name: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + source_lease_id: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "rename")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["x-ms-source-container-name"] = _SERIALIZER.header("source_container_name", source_container_name, "str") + if source_lease_id is not None: + _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_submit_batch_request( + url: str, + *, + content_length: int, + content: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "batch")) # type: str + multipart_content_type = kwargs.pop( + "multipart_content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if multipart_content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("multipart_content_type", multipart_content_type, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_filter_blobs_request( + url: str, + *, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.FilterBlobsIncludeItem"]]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "blobs")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if where is not None: + _params["where"] = _SERIALIZER.query("where", where, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if include is not None: + _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_acquire_lease_request( + url: str, + *, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if duration is not None: + _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int") + if proposed_lease_id is not None: + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_release_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_renew_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_break_lease_request( + url: str, + *, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if break_period is not None: + _headers["x-ms-lease-break-period"] = _SERIALIZER.header("break_period", break_period, "int") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_change_lease_request( + url: str, + *, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_blob_flat_segment_request( + url: str, + *, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if prefix is not None: + _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if include is not None: + _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_blob_hierarchy_segment_request( + url: str, + *, + delimiter: str, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if prefix is not None: + _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str") + _params["delimiter"] = _SERIALIZER.query("delimiter", delimiter, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if include is not None: + _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_account_info_request(url: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class ContainerOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.AzureBlobStorage`'s + :attr:`container` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def create( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + container_cpk_scope_info: Optional[_models.ContainerCpkScopeInfo] = None, + **kwargs: Any + ) -> None: + """creates a new container under the specified account. If the container with the same name + already exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. Known values are: "container" and "blob". Default value is None. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param container_cpk_scope_info: Parameter group. Default value is None. + :type container_cpk_scope_info: ~azure.storage.blob.models.ContainerCpkScopeInfo + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _default_encryption_scope = None + _prevent_encryption_scope_override = None + if container_cpk_scope_info is not None: + _default_encryption_scope = container_cpk_scope_info.default_encryption_scope + _prevent_encryption_scope_override = container_cpk_scope_info.prevent_encryption_scope_override + + request = build_create_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + access=access, + request_id_parameter=request_id_parameter, + default_encryption_scope=_default_encryption_scope, + prevent_encryption_scope_override=_prevent_encryption_scope_override, + restype=restype, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def get_properties( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """returns all user-defined metadata and system properties for the specified container. The data + returned does not include the container's list of blobs. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_properties_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + restype=restype, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-blob-public-access"] = self._deserialize( + "str", response.headers.get("x-ms-blob-public-access") + ) + response_headers["x-ms-has-immutability-policy"] = self._deserialize( + "bool", response.headers.get("x-ms-has-immutability-policy") + ) + response_headers["x-ms-has-legal-hold"] = self._deserialize("bool", response.headers.get("x-ms-has-legal-hold")) + response_headers["x-ms-default-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-default-encryption-scope") + ) + response_headers["x-ms-deny-encryption-scope-override"] = self._deserialize( + "bool", response.headers.get("x-ms-deny-encryption-scope-override") + ) + response_headers["x-ms-immutable-storage-with-versioning-enabled"] = self._deserialize( + "bool", response.headers.get("x-ms-immutable-storage-with-versioning-enabled") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """operation marks the specified container for deletion. The container and any blobs contained + within it are later deleted during garbage collection. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_delete_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + restype=restype, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """operation sets one or more user-defined name-value pairs for the specified container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "metadata")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + metadata=metadata, + if_modified_since=_if_modified_since, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def get_access_policy( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> List[_models.SignedIdentifier]: + """gets the permissions for the specified container. The permissions indicate whether container + data may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier or the result of cls(response) + :rtype: list[~azure.storage.blob.models.SignedIdentifier] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[List[_models.SignedIdentifier]] + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-blob-public-access"] = self._deserialize( + "str", response.headers.get("x-ms-blob-public-access") + ) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("[SignedIdentifier]", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_access_policy.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def set_access_policy( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + access: Optional[Union[str, "_models.PublicAccessType"]] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + container_acl: Optional[List[_models.SignedIdentifier]] = None, + **kwargs: Any + ) -> None: + """sets the permissions for the specified container. The permissions indicate whether blobs in a + container may be accessed publicly. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param access: Specifies whether data in the container may be accessed publicly and the level + of access. Known values are: "container" and "blob". Default value is None. + :type access: str or ~azure.storage.blob.models.PublicAccessType + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param container_acl: the acls for the container. Default value is None. + :type container_acl: list[~azure.storage.blob.models.SignedIdentifier] + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "acl")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True, "itemsName": "SignedIdentifier"}} + if container_acl is not None: + _content = self._serialize.body( + container_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt + ) + else: + _content = None + + request = build_set_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + access=access, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def restore( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_container_name: Optional[str] = None, + deleted_container_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param deleted_container_name: Optional. Version 2019-12-12 and later. Specifies the name of + the deleted container to restore. Default value is None. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and later. Specifies the + version of the deleted container to restore. Default value is None. + :type deleted_container_version: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "undelete". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "undelete")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_restore_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + deleted_container_name=deleted_container_name, + deleted_container_version=deleted_container_version, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.restore.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def rename( # pylint: disable=inconsistent-return-statements + self, + source_container_name: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + source_lease_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """Renames an existing container. + + :param source_container_name: Required. Specifies the name of the container to rename. + Required. + :type source_container_name: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param source_lease_id: A lease ID for the source path. If specified, the source path must have + an active lease and the lease ID must match. Default value is None. + :type source_lease_id: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "rename". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "rename")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_rename_request( + url=self._config.url, + source_container_name=source_container_name, + timeout=timeout, + request_id_parameter=request_id_parameter, + source_lease_id=source_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.rename.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def submit_batch( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> Iterator[bytes]: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "batch". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Iterator of the response bytes or the result of cls(response) + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "batch")) # type: str + multipart_content_type = kwargs.pop( + "multipart_content_type", _headers.pop("Content-Type", "application/xml") + ) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[Iterator[bytes]] + + _content = body + + request = build_submit_batch_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + multipart_content_type=multipart_content_type, + version=self._config.version, + content=_content, + template_url=self.submit_batch.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + submit_batch.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def filter_blobs( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.FilterBlobsIncludeItem"]]] = None, + **kwargs: Any + ) -> _models.FilterBlobSegment: + """The Filter Blobs operation enables callers to list blobs in a container whose tags match a + given search expression. Filter blobs searches within the given container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. Default value is None. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem] + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "blobs". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "blobs")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.FilterBlobSegment] + + request = build_filter_blobs_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + where=where, + marker=marker, + maxresults=maxresults, + include=include, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.filter_blobs.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("FilterBlobSegment", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + filter_blobs.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def release_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def renew_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "renew". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_renew_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.renew_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. Default value is None. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + break_period=break_period, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + proposed_lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] establishes and manages a lock on a container for delete operations. The lock duration + can be 15 to 60 seconds, or can be infinite. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Required. + :type proposed_lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "lease")) # type: str + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + action = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + if modified_access_conditions is not None: + _if_modified_since = modified_access_conditions.if_modified_since + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + proposed_lease_id=proposed_lease_id, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + request_id_parameter=request_id_parameter, + comp=comp, + restype=restype, + action=action, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def list_blob_flat_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.ListBlobsFlatSegmentResponse: + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsFlatSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsFlatSegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.ListBlobsFlatSegmentResponse] + + request = build_list_blob_flat_segment_request( + url=self._config.url, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.list_blob_flat_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListBlobsFlatSegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_blob_flat_segment.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def list_blob_hierarchy_segment( + self, + delimiter: str, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListBlobsIncludeItem"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.ListBlobsHierarchySegmentResponse: + """[Update] The List Blobs operation returns a list of the blobs under the specified container. + + :param delimiter: When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose names begin with + the same substring up to the appearance of the delimiter character. The delimiter may be a + single character or a string. Required. + :type delimiter: str + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.ListBlobsIncludeItem] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "container". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListBlobsHierarchySegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListBlobsHierarchySegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "container")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.ListBlobsHierarchySegmentResponse] + + request = build_list_blob_hierarchy_segment_request( + url=self._config.url, + delimiter=delimiter, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.list_blob_hierarchy_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListBlobsHierarchySegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_blob_hierarchy_segment.metadata = {"url": "{url}/{containerName}"} # type: ignore + + @distributed_trace + def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Returns the sku name and account kind. + + :keyword restype: restype. Default value is "account". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_get_account_info_request( + url=self._config.url, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_account_info.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name")) + response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {"url": "{url}/{containerName}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_page_blob_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_page_blob_operations.py new file mode 100644 index 00000000000..ad6e0b2f3c8 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_page_blob_operations.py @@ -0,0 +1,2230 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import datetime +from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_create_request( + url: str, + *, + content_length: int, + blob_content_length: int, + timeout: Optional[int] = None, + tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, + blob_content_type: Optional[str] = None, + blob_content_encoding: Optional[str] = None, + blob_content_language: Optional[str] = None, + blob_content_md5: Optional[bytes] = None, + blob_cache_control: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + blob_content_disposition: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + blob_sequence_number: int = 0, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "PageBlob")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-blob-type"] = _SERIALIZER.header("blob_type", blob_type, "str") + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("tier", tier, "str") + if blob_content_type is not None: + _headers["x-ms-blob-content-type"] = _SERIALIZER.header("blob_content_type", blob_content_type, "str") + if blob_content_encoding is not None: + _headers["x-ms-blob-content-encoding"] = _SERIALIZER.header( + "blob_content_encoding", blob_content_encoding, "str" + ) + if blob_content_language is not None: + _headers["x-ms-blob-content-language"] = _SERIALIZER.header( + "blob_content_language", blob_content_language, "str" + ) + if blob_content_md5 is not None: + _headers["x-ms-blob-content-md5"] = _SERIALIZER.header("blob_content_md5", blob_content_md5, "bytearray") + if blob_cache_control is not None: + _headers["x-ms-blob-cache-control"] = _SERIALIZER.header("blob_cache_control", blob_cache_control, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if blob_content_disposition is not None: + _headers["x-ms-blob-content-disposition"] = _SERIALIZER.header( + "blob_content_disposition", blob_content_disposition, "str" + ) + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-blob-content-length"] = _SERIALIZER.header("blob_content_length", blob_content_length, "int") + if blob_sequence_number is not None: + _headers["x-ms-blob-sequence-number"] = _SERIALIZER.header("blob_sequence_number", blob_sequence_number, "int") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if blob_tags_string is not None: + _headers["x-ms-tags"] = _SERIALIZER.header("blob_tags_string", blob_tags_string, "str") + if immutability_policy_expiry is not None: + _headers["x-ms-immutability-policy-until-date"] = _SERIALIZER.header( + "immutability_policy_expiry", immutability_policy_expiry, "rfc-1123" + ) + if immutability_policy_mode is not None: + _headers["x-ms-immutability-policy-mode"] = _SERIALIZER.header( + "immutability_policy_mode", immutability_policy_mode, "str" + ) + if legal_hold is not None: + _headers["x-ms-legal-hold"] = _SERIALIZER.header("legal_hold", legal_hold, "bool") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_upload_pages_request( + url: str, + *, + content_length: int, + content: IO, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_id: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_sequence_number_less_than_or_equal_to: Optional[int] = None, + if_sequence_number_less_than: Optional[int] = None, + if_sequence_number_equal_to: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-page-write"] = _SERIALIZER.header("page_write", page_write, "str") + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if transactional_content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header( + "transactional_content_md5", transactional_content_md5, "bytearray" + ) + if transactional_content_crc64 is not None: + _headers["x-ms-content-crc64"] = _SERIALIZER.header( + "transactional_content_crc64", transactional_content_crc64, "bytearray" + ) + if range is not None: + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_sequence_number_less_than_or_equal_to is not None: + _headers["x-ms-if-sequence-number-le"] = _SERIALIZER.header( + "if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, "int" + ) + if if_sequence_number_less_than is not None: + _headers["x-ms-if-sequence-number-lt"] = _SERIALIZER.header( + "if_sequence_number_less_than", if_sequence_number_less_than, "int" + ) + if if_sequence_number_equal_to is not None: + _headers["x-ms-if-sequence-number-eq"] = _SERIALIZER.header( + "if_sequence_number_equal_to", if_sequence_number_equal_to, "int" + ) + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_clear_pages_request( + url: str, + *, + content_length: int, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_id: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_sequence_number_less_than_or_equal_to: Optional[int] = None, + if_sequence_number_less_than: Optional[int] = None, + if_sequence_number_equal_to: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "clear")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-page-write"] = _SERIALIZER.header("page_write", page_write, "str") + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if range is not None: + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_sequence_number_less_than_or_equal_to is not None: + _headers["x-ms-if-sequence-number-le"] = _SERIALIZER.header( + "if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, "int" + ) + if if_sequence_number_less_than is not None: + _headers["x-ms-if-sequence-number-lt"] = _SERIALIZER.header( + "if_sequence_number_less_than", if_sequence_number_less_than, "int" + ) + if if_sequence_number_equal_to is not None: + _headers["x-ms-if-sequence-number-eq"] = _SERIALIZER.header( + "if_sequence_number_equal_to", if_sequence_number_equal_to, "int" + ) + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_upload_pages_from_url_request( + url: str, + *, + source_url: str, + source_range: str, + content_length: int, + range: str, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + lease_id: Optional[str] = None, + if_sequence_number_less_than_or_equal_to: Optional[int] = None, + if_sequence_number_less_than: Optional[int] = None, + if_sequence_number_equal_to: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + source_if_modified_since: Optional[datetime.datetime] = None, + source_if_unmodified_since: Optional[datetime.datetime] = None, + source_if_match: Optional[str] = None, + source_if_none_match: Optional[str] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-page-write"] = _SERIALIZER.header("page_write", page_write, "str") + _headers["x-ms-copy-source"] = _SERIALIZER.header("source_url", source_url, "str") + _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str") + if source_content_md5 is not None: + _headers["x-ms-source-content-md5"] = _SERIALIZER.header("source_content_md5", source_content_md5, "bytearray") + if source_contentcrc64 is not None: + _headers["x-ms-source-content-crc64"] = _SERIALIZER.header( + "source_contentcrc64", source_contentcrc64, "bytearray" + ) + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_sequence_number_less_than_or_equal_to is not None: + _headers["x-ms-if-sequence-number-le"] = _SERIALIZER.header( + "if_sequence_number_less_than_or_equal_to", if_sequence_number_less_than_or_equal_to, "int" + ) + if if_sequence_number_less_than is not None: + _headers["x-ms-if-sequence-number-lt"] = _SERIALIZER.header( + "if_sequence_number_less_than", if_sequence_number_less_than, "int" + ) + if if_sequence_number_equal_to is not None: + _headers["x-ms-if-sequence-number-eq"] = _SERIALIZER.header( + "if_sequence_number_equal_to", if_sequence_number_equal_to, "int" + ) + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + if source_if_modified_since is not None: + _headers["x-ms-source-if-modified-since"] = _SERIALIZER.header( + "source_if_modified_since", source_if_modified_since, "rfc-1123" + ) + if source_if_unmodified_since is not None: + _headers["x-ms-source-if-unmodified-since"] = _SERIALIZER.header( + "source_if_unmodified_since", source_if_unmodified_since, "rfc-1123" + ) + if source_if_match is not None: + _headers["x-ms-source-if-match"] = _SERIALIZER.header("source_if_match", source_if_match, "str") + if source_if_none_match is not None: + _headers["x-ms-source-if-none-match"] = _SERIALIZER.header("source_if_none_match", source_if_none_match, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if copy_source_authorization is not None: + _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header( + "copy_source_authorization", copy_source_authorization, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_page_ranges_request( + url: str, + *, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "pagelist")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + + # Construct headers + if range is not None: + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_page_ranges_diff_request( + url: str, + *, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + prevsnapshot: Optional[str] = None, + prev_snapshot_url: Optional[str] = None, + range: Optional[str] = None, + lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "pagelist")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if snapshot is not None: + _params["snapshot"] = _SERIALIZER.query("snapshot", snapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if prevsnapshot is not None: + _params["prevsnapshot"] = _SERIALIZER.query("prevsnapshot", prevsnapshot, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + + # Construct headers + if prev_snapshot_url is not None: + _headers["x-ms-previous-snapshot-url"] = _SERIALIZER.header("prev_snapshot_url", prev_snapshot_url, "str") + if range is not None: + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_resize_request( + url: str, + *, + blob_content_length: int, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + encryption_key: Optional[str] = None, + encryption_key_sha256: Optional[str] = None, + encryption_algorithm: Optional[Union[str, "_models.EncryptionAlgorithmType"]] = None, + encryption_scope: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if encryption_key is not None: + _headers["x-ms-encryption-key"] = _SERIALIZER.header("encryption_key", encryption_key, "str") + if encryption_key_sha256 is not None: + _headers["x-ms-encryption-key-sha256"] = _SERIALIZER.header( + "encryption_key_sha256", encryption_key_sha256, "str" + ) + if encryption_algorithm is not None: + _headers["x-ms-encryption-algorithm"] = _SERIALIZER.header("encryption_algorithm", encryption_algorithm, "str") + if encryption_scope is not None: + _headers["x-ms-encryption-scope"] = _SERIALIZER.header("encryption_scope", encryption_scope, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-blob-content-length"] = _SERIALIZER.header("blob_content_length", blob_content_length, "int") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_update_sequence_number_request( + url: str, + *, + sequence_number_action: Union[str, "_models.SequenceNumberActionType"], + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + blob_sequence_number: int = 0, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-sequence-number-action"] = _SERIALIZER.header( + "sequence_number_action", sequence_number_action, "str" + ) + if blob_sequence_number is not None: + _headers["x-ms-blob-sequence-number"] = _SERIALIZER.header("blob_sequence_number", blob_sequence_number, "int") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_copy_incremental_request( + url: str, + *, + copy_source: str, + timeout: Optional[int] = None, + if_modified_since: Optional[datetime.datetime] = None, + if_unmodified_since: Optional[datetime.datetime] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, + if_tags: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "incrementalcopy")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{containerName}/{blob}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if if_modified_since is not None: + _headers["If-Modified-Since"] = _SERIALIZER.header("if_modified_since", if_modified_since, "rfc-1123") + if if_unmodified_since is not None: + _headers["If-Unmodified-Since"] = _SERIALIZER.header("if_unmodified_since", if_unmodified_since, "rfc-1123") + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + if if_tags is not None: + _headers["x-ms-if-tags"] = _SERIALIZER.header("if_tags", if_tags, "str") + _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +class PageBlobOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.AzureBlobStorage`'s + :attr:`page_blob` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def create( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + blob_content_length: int, + timeout: Optional[int] = None, + tier: Optional[Union[str, "_models.PremiumPageBlobAccessTier"]] = None, + metadata: Optional[Dict[str, str]] = None, + blob_sequence_number: int = 0, + request_id_parameter: Optional[str] = None, + blob_tags_string: Optional[str] = None, + immutability_policy_expiry: Optional[datetime.datetime] = None, + immutability_policy_mode: Optional[Union[str, "_models.BlobImmutabilityPolicyMode"]] = None, + legal_hold: Optional[bool] = None, + blob_http_headers: Optional[_models.BlobHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Create operation creates a new page blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. Required. + :type blob_content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param tier: Optional. Indicates the tier to be set on the page blob. Known values are: "P4", + "P6", "P10", "P15", "P20", "P30", "P40", "P50", "P60", "P70", and "P80". Default value is None. + :type tier: str or ~azure.storage.blob.models.PremiumPageBlobAccessTier + :param metadata: Optional. Specifies a user-defined name-value pair associated with the blob. + If no name-value pairs are specified, the operation will copy the metadata from the source blob + or file to the destination blob. If one or more name-value pairs are specified, the destination + blob is created with the specified metadata, and metadata is not copied from the source blob or + file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming + rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more + information. Default value is None. + :type metadata: dict[str, str] + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. Default value is 0. + :type blob_sequence_number: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param blob_tags_string: Optional. Used to set blob tags in various blob operations. Default + value is None. + :type blob_tags_string: str + :param immutability_policy_expiry: Specifies the date time when the blobs immutability policy + is set to expire. Default value is None. + :type immutability_policy_expiry: ~datetime.datetime + :param immutability_policy_mode: Specifies the immutability policy mode to set on the blob. + Known values are: "Mutable", "Unlocked", and "Locked". Default value is None. + :type immutability_policy_mode: str or ~azure.storage.blob.models.BlobImmutabilityPolicyMode + :param legal_hold: Specified if a legal hold should be set on the blob. Default value is None. + :type legal_hold: bool + :param blob_http_headers: Parameter group. Default value is None. + :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword blob_type: Specifies the type of blob to create: block blob, page blob, or append + blob. Default value is "PageBlob". Note that overriding this default value may result in + unsupported behavior. + :paramtype blob_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + blob_type = kwargs.pop("blob_type", _headers.pop("x-ms-blob-type", "PageBlob")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _blob_content_type = None + _blob_content_encoding = None + _blob_content_language = None + _blob_content_md5 = None + _blob_cache_control = None + _lease_id = None + _blob_content_disposition = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if blob_http_headers is not None: + _blob_cache_control = blob_http_headers.blob_cache_control + _blob_content_disposition = blob_http_headers.blob_content_disposition + _blob_content_encoding = blob_http_headers.blob_content_encoding + _blob_content_language = blob_http_headers.blob_content_language + _blob_content_md5 = blob_http_headers.blob_content_md5 + _blob_content_type = blob_http_headers.blob_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_create_request( + url=self._config.url, + content_length=content_length, + blob_content_length=blob_content_length, + timeout=timeout, + tier=tier, + blob_content_type=_blob_content_type, + blob_content_encoding=_blob_content_encoding, + blob_content_language=_blob_content_language, + blob_content_md5=_blob_content_md5, + blob_cache_control=_blob_cache_control, + metadata=metadata, + lease_id=_lease_id, + blob_content_disposition=_blob_content_disposition, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + blob_sequence_number=blob_sequence_number, + request_id_parameter=request_id_parameter, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + blob_type=blob_type, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["x-ms-version-id"] = self._deserialize("str", response.headers.get("x-ms-version-id")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def upload_pages( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + body: IO, + transactional_content_md5: Optional[bytes] = None, + transactional_content_crc64: Optional[bytes] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param transactional_content_md5: Specify the transactional md5 for the body, to be validated + by the service. Default value is None. + :type transactional_content_md5: bytes + :param transactional_content_crc64: Specify the transactional crc64 for the body, to be + validated by the service. Default value is None. + :type transactional_content_crc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. Default value is None. + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "page". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword page_write: Required. You may specify one of the following options: + + + * Update: Writes the bytes specified by the request body into the specified range. The Range + and Content-Length headers must match to perform the update. + * Clear: Clears the specified range and releases the space used in storage for that range. To + clear a range, set the Content-Length header to zero, and the Range header to a value that + indicates the range to clear, up to maximum blob size. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype page_write: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if sequence_number_access_conditions is not None: + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_less_than_or_equal_to = ( + sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + ) + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + _content = body + + request = build_upload_pages_request( + url=self._config.url, + content_length=content_length, + transactional_content_md5=transactional_content_md5, + transactional_content_crc64=transactional_content_crc64, + timeout=timeout, + range=range, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, + if_sequence_number_less_than=_if_sequence_number_less_than, + if_sequence_number_equal_to=_if_sequence_number_equal_to, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + page_write=page_write, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.upload_pages.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def clear_pages( # pylint: disable=inconsistent-return-statements + self, + content_length: int, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Clear Pages operation clears a set of pages from a page blob. + + :param content_length: The length of the request. Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param sequence_number_access_conditions: Parameter group. Default value is None. + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "page". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword page_write: Required. You may specify one of the following options: + + + * Update: Writes the bytes specified by the request body into the specified range. The Range + and Content-Length headers must match to perform the update. + * Clear: Clears the specified range and releases the space used in storage for that range. To + clear a range, set the Content-Length header to zero, and the Range header to a value that + indicates the range to clear, up to maximum blob size. Default value is "clear". Note that + overriding this default value may result in unsupported behavior. + :paramtype page_write: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "clear")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if sequence_number_access_conditions is not None: + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_less_than_or_equal_to = ( + sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + ) + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_clear_pages_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + range=range, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, + if_sequence_number_less_than=_if_sequence_number_less_than, + if_sequence_number_equal_to=_if_sequence_number_equal_to, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + page_write=page_write, + version=self._config.version, + template_url=self.clear_pages.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + clear_pages.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def upload_pages_from_url( # pylint: disable=inconsistent-return-statements + self, + source_url: str, + source_range: str, + content_length: int, + range: str, + source_content_md5: Optional[bytes] = None, + source_contentcrc64: Optional[bytes] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + sequence_number_access_conditions: Optional[_models.SequenceNumberAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Upload Pages operation writes a range of pages to a page blob where the contents are read + from a URL. + + :param source_url: Specify a URL to the copy source. Required. + :type source_url: str + :param source_range: Bytes of source data in the specified range. The length of this range + should match the ContentLength header and x-ms-range/Range destination range header. Required. + :type source_range: str + :param content_length: The length of the request. Required. + :type content_length: int + :param range: The range of bytes to which the source range would be written. The range should + be 512 aligned and range-end is required. Required. + :type range: str + :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read + from the copy source. Default value is None. + :type source_content_md5: bytes + :param source_contentcrc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_contentcrc64: bytes + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param sequence_number_access_conditions: Parameter group. Default value is None. + :type sequence_number_access_conditions: + ~azure.storage.blob.models.SequenceNumberAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.blob.models.SourceModifiedAccessConditions + :keyword comp: comp. Default value is "page". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword page_write: Required. You may specify one of the following options: + + + * Update: Writes the bytes specified by the request body into the specified range. The Range + and Content-Length headers must match to perform the update. + * Clear: Clears the specified range and releases the space used in storage for that range. To + clear a range, set the Content-Length header to zero, and the Range header to a value that + indicates the range to clear, up to maximum blob size. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype page_write: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "page")) # type: str + page_write = kwargs.pop("page_write", _headers.pop("x-ms-page-write", "update")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _lease_id = None + _if_sequence_number_less_than_or_equal_to = None + _if_sequence_number_less_than = None + _if_sequence_number_equal_to = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + _source_if_modified_since = None + _source_if_unmodified_since = None + _source_if_match = None + _source_if_none_match = None + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if sequence_number_access_conditions is not None: + _if_sequence_number_equal_to = sequence_number_access_conditions.if_sequence_number_equal_to + _if_sequence_number_less_than = sequence_number_access_conditions.if_sequence_number_less_than + _if_sequence_number_less_than_or_equal_to = ( + sequence_number_access_conditions.if_sequence_number_less_than_or_equal_to + ) + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + if source_modified_access_conditions is not None: + _source_if_match = source_modified_access_conditions.source_if_match + _source_if_modified_since = source_modified_access_conditions.source_if_modified_since + _source_if_none_match = source_modified_access_conditions.source_if_none_match + _source_if_unmodified_since = source_modified_access_conditions.source_if_unmodified_since + + request = build_upload_pages_from_url_request( + url=self._config.url, + source_url=source_url, + source_range=source_range, + content_length=content_length, + range=range, + source_content_md5=source_content_md5, + source_contentcrc64=source_contentcrc64, + timeout=timeout, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + lease_id=_lease_id, + if_sequence_number_less_than_or_equal_to=_if_sequence_number_less_than_or_equal_to, + if_sequence_number_less_than=_if_sequence_number_less_than, + if_sequence_number_equal_to=_if_sequence_number_equal_to, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + source_if_modified_since=_source_if_modified_since, + source_if_unmodified_since=_source_if_unmodified_since, + source_if_match=_source_if_match, + source_if_none_match=_source_if_none_match, + request_id_parameter=request_id_parameter, + copy_source_authorization=copy_source_authorization, + comp=comp, + page_write=page_write, + version=self._config.version, + template_url=self.upload_pages_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-encryption-key-sha256"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-key-sha256") + ) + response_headers["x-ms-encryption-scope"] = self._deserialize( + "str", response.headers.get("x-ms-encryption-scope") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_pages_from_url.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def get_page_ranges( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> _models.PageList: + """The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot + of a page blob. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "pagelist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "pagelist")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.PageList] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_get_page_ranges_request( + url=self._config.url, + snapshot=snapshot, + timeout=timeout, + range=range, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + marker=marker, + maxresults=maxresults, + comp=comp, + version=self._config.version, + template_url=self.get_page_ranges.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-blob-content-length"] = self._deserialize( + "int", response.headers.get("x-ms-blob-content-length") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("PageList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_page_ranges.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def get_page_ranges_diff( + self, + snapshot: Optional[str] = None, + timeout: Optional[int] = None, + prevsnapshot: Optional[str] = None, + prev_snapshot_url: Optional[str] = None, + range: Optional[str] = None, + request_id_parameter: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> _models.PageList: + """The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that + were changed between target blob and previous snapshot. + + :param snapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the blob snapshot to retrieve. For more information on working with blob snapshots, + see :code:`Creating + a Snapshot of a Blob.`. Default value is None. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param prevsnapshot: Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a + DateTime value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by prevsnapshot is + the older of the two. Note that incremental snapshots are currently supported only for blobs + created on or after January 1, 2016. Default value is None. + :type prevsnapshot: str + :param prev_snapshot_url: Optional. This header is only supported in service versions + 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The + response will only contain pages that were changed between the target blob and its previous + snapshot. Default value is None. + :type prev_snapshot_url: str + :param range: Return only the bytes of the blob in the specified range. Default value is None. + :type range: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "pagelist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: PageList or the result of cls(response) + :rtype: ~azure.storage.blob.models.PageList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "pagelist")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.PageList] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_get_page_ranges_diff_request( + url=self._config.url, + snapshot=snapshot, + timeout=timeout, + prevsnapshot=prevsnapshot, + prev_snapshot_url=prev_snapshot_url, + range=range, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + marker=marker, + maxresults=maxresults, + comp=comp, + version=self._config.version, + template_url=self.get_page_ranges_diff.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-blob-content-length"] = self._deserialize( + "int", response.headers.get("x-ms-blob-content-length") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("PageList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_page_ranges_diff.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def resize( # pylint: disable=inconsistent-return-statements + self, + blob_content_length: int, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + cpk_info: Optional[_models.CpkInfo] = None, + cpk_scope_info: Optional[_models.CpkScopeInfo] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """Resize the Blob. + + :param blob_content_length: This header specifies the maximum size for the page blob, up to 1 + TB. The page blob size must be aligned to a 512-byte boundary. Required. + :type blob_content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Parameter group. Default value is None. + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param cpk_scope_info: Parameter group. Default value is None. + :type cpk_scope_info: ~azure.storage.blob.models.CpkScopeInfo + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _encryption_key = None + _encryption_key_sha256 = None + _encryption_algorithm = None + _encryption_scope = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if cpk_info is not None: + _encryption_algorithm = cpk_info.encryption_algorithm + _encryption_key = cpk_info.encryption_key + _encryption_key_sha256 = cpk_info.encryption_key_sha256 + if cpk_scope_info is not None: + _encryption_scope = cpk_scope_info.encryption_scope + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_resize_request( + url=self._config.url, + blob_content_length=blob_content_length, + timeout=timeout, + lease_id=_lease_id, + encryption_key=_encryption_key, + encryption_key_sha256=_encryption_key_sha256, + encryption_algorithm=_encryption_algorithm, + encryption_scope=_encryption_scope, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.resize.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + resize.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def update_sequence_number( # pylint: disable=inconsistent-return-statements + self, + sequence_number_action: Union[str, "_models.SequenceNumberActionType"], + timeout: Optional[int] = None, + blob_sequence_number: int = 0, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """Update the sequence number of the blob. + + :param sequence_number_action: Required if the x-ms-blob-sequence-number header is set for the + request. This property applies to page blobs only. This property indicates how the service + should modify the blob's sequence number. Known values are: "max", "update", and "increment". + Required. + :type sequence_number_action: str or ~azure.storage.blob.models.SequenceNumberActionType + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param blob_sequence_number: Set for page blobs only. The sequence number is a user-controlled + value that you can use to track requests. The value of the sequence number must be between 0 + and 2^63 - 1. Default value is 0. + :type blob_sequence_number: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _lease_id = None + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_update_sequence_number_request( + url=self._config.url, + sequence_number_action=sequence_number_action, + timeout=timeout, + lease_id=_lease_id, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + blob_sequence_number=blob_sequence_number, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.update_sequence_number.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-blob-sequence-number"] = self._deserialize( + "int", response.headers.get("x-ms-blob-sequence-number") + ) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + update_sequence_number.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore + + @distributed_trace + def copy_incremental( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + modified_access_conditions: Optional[_models.ModifiedAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Copy Incremental operation copies a snapshot of the source page blob to a destination page + blob. The snapshot is copied such that only the differential changes between the previously + copied snapshot are transferred to the destination. The copied snapshots are complete copies of + the original snapshot and can be read or copied from as usual. This API is supported since REST + version 2016-05-31. + + :param copy_source: Specifies the name of the source page blob snapshot. This value is a URL of + up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it + would appear in a request URI. The source blob must either be public or must be authenticated + via a shared access signature. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param modified_access_conditions: Parameter group. Default value is None. + :type modified_access_conditions: ~azure.storage.blob.models.ModifiedAccessConditions + :keyword comp: comp. Default value is "incrementalcopy". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "incrementalcopy")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _if_modified_since = None + _if_unmodified_since = None + _if_match = None + _if_none_match = None + _if_tags = None + if modified_access_conditions is not None: + _if_match = modified_access_conditions.if_match + _if_modified_since = modified_access_conditions.if_modified_since + _if_none_match = modified_access_conditions.if_none_match + _if_tags = modified_access_conditions.if_tags + _if_unmodified_since = modified_access_conditions.if_unmodified_since + + request = build_copy_incremental_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + if_modified_since=_if_modified_since, + if_unmodified_since=_if_unmodified_since, + if_match=_if_match, + if_none_match=_if_none_match, + if_tags=_if_tags, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.copy_incremental.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + copy_incremental.metadata = {"url": "{url}/{containerName}/{blob}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_patch.py new file mode 100644 index 00000000000..029b47fe478 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_patch.py @@ -0,0 +1,23 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import List +__all__ = [] # type: List[str] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_service_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_service_operations.py new file mode 100644 index 00000000000..75e0f8a8f2f --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_generated/operations/_service_operations.py @@ -0,0 +1,1056 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from typing import Any, Callable, Dict, IO, Iterator, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_set_properties_request( + url: str, *, content: Any, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_get_properties_request( + url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_statistics_request( + url: str, *, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "stats")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_containers_segment_request( + url: str, + *, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if prefix is not None: + _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if include is not None: + _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_user_delegation_key_request( + url: str, *, content: Any, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "userdelegationkey")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_get_account_info_request(url: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_submit_batch_request( + url: str, + *, + content_length: int, + content: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "batch")) # type: str + multipart_content_type = kwargs.pop( + "multipart_content_type", _headers.pop("Content-Type", None) + ) # type: Optional[str] + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if multipart_content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("multipart_content_type", multipart_content_type, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_filter_blobs_request( + url: str, + *, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.FilterBlobsIncludeItem"]]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blobs")) # type: str + version = kwargs.pop("version", _headers.pop("x-ms-version", "2021-12-02")) # type: str + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url = _format_url_section(_url, **path_format_arguments) + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if where is not None: + _params["where"] = _SERIALIZER.query("where", where, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if include is not None: + _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class ServiceOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.blob.AzureBlobStorage`'s + :attr:`service` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def set_properties( # pylint: disable=inconsistent-return-statements + self, + storage_service_properties: _models.StorageServiceProperties, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets properties for a storage account's Blob service endpoint, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. Required. + :type storage_service_properties: ~azure.storage.blob.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True) + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace + def get_properties( + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> _models.StorageServiceProperties: + """gets the properties of a storage account's Blob service, including properties for Storage + Analytics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.StorageServiceProperties] + + request = build_get_properties_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("StorageServiceProperties", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_properties.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace + def get_statistics( + self, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> _models.StorageServiceStats: + """Retrieves statistics related to replication for the Blob service. It is only available on the + secondary location endpoint when read-access geo-redundant replication is enabled for the + storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "stats". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceStats or the result of cls(response) + :rtype: ~azure.storage.blob.models.StorageServiceStats + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "stats")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.StorageServiceStats] + + request = build_get_statistics_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_statistics.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("StorageServiceStats", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_statistics.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace + def list_containers_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.ListContainersIncludeType"]]] = None, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.ListContainersSegmentResponse: + """The List Containers Segment operation returns a list of the containers under the specified + account. + + :param prefix: Filters the results to return only containers whose name begins with the + specified prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify that the container's metadata be returned as + part of the response body. Default value is None. + :type include: list[str or ~azure.storage.blob.models.ListContainersIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListContainersSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.blob.models.ListContainersSegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "list")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.ListContainersSegmentResponse] + + request = build_list_containers_segment_request( + url=self._config.url, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + version=self._config.version, + template_url=self.list_containers_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("ListContainersSegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_containers_segment.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace + def get_user_delegation_key( + self, + key_info: _models.KeyInfo, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> _models.UserDelegationKey: + """Retrieves a user delegation key for the Blob service. This is only a valid operation when using + bearer token authentication. + + :param key_info: Key information. Required. + :type key_info: ~azure.storage.blob.models.KeyInfo + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "userdelegationkey". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: UserDelegationKey or the result of cls(response) + :rtype: ~azure.storage.blob.models.UserDelegationKey + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "service")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "userdelegationkey")) # type: str + content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.UserDelegationKey] + + _content = self._serialize.body(key_info, "KeyInfo", is_xml=True) + + request = build_get_user_delegation_key_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.get_user_delegation_key.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("UserDelegationKey", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_user_delegation_key.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace + def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Returns the sku name and account kind. + + :keyword restype: restype. Default value is "account". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype = kwargs.pop("restype", _params.pop("restype", "account")) # type: str + comp = kwargs.pop("comp", _params.pop("comp", "properties")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[None] + + request = build_get_account_info_request( + url=self._config.url, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_account_info.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-sku-name"] = self._deserialize("str", response.headers.get("x-ms-sku-name")) + response_headers["x-ms-account-kind"] = self._deserialize("str", response.headers.get("x-ms-account-kind")) + response_headers["x-ms-is-hns-enabled"] = self._deserialize("bool", response.headers.get("x-ms-is-hns-enabled")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_account_info.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace + def submit_batch( + self, + content_length: int, + body: IO, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> Iterator[bytes]: + """The Batch operation allows multiple API calls to be embedded into a single HTTP request. + + :param content_length: The length of the request. Required. + :type content_length: int + :param body: Initial data. Required. + :type body: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "batch". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Iterator of the response bytes or the result of cls(response) + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "batch")) # type: str + multipart_content_type = kwargs.pop( + "multipart_content_type", _headers.pop("Content-Type", "application/xml") + ) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[Iterator[bytes]] + + _content = body + + request = build_submit_batch_request( + url=self._config.url, + content_length=content_length, + timeout=timeout, + request_id_parameter=request_id_parameter, + comp=comp, + multipart_content_type=multipart_content_type, + version=self._config.version, + content=_content, + template_url=self.submit_batch.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + submit_batch.metadata = {"url": "{url}"} # type: ignore + + @distributed_trace + def filter_blobs( + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + where: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, "_models.FilterBlobsIncludeItem"]]] = None, + **kwargs: Any + ) -> _models.FilterBlobSegment: + """The Filter Blobs operation enables callers to list blobs across all containers whose tags match + a given search expression. Filter blobs searches across all containers within a storage + account but can be scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for Blob Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param where: Filters the results to return only to return only blobs whose tags match the + specified expression. Default value is None. + :type where: str + :param marker: A string value that identifies the portion of the list of containers to be + returned with the next listing operation. The operation returns the NextMarker value within the + response body if the listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value for the marker parameter + in a subsequent call to request the next page of list items. The marker value is opaque to the + client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of containers to return. If the request does + not specify maxresults, or specifies a value greater than 5000, the server will return up to + 5000 items. Note that if the listing operation crosses a partition boundary, then the service + will return a continuation token for retrieving the remainder of the results. For this reason, + it is possible that the service will return fewer results than specified by maxresults, or than + the default of 5000. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.blob.models.FilterBlobsIncludeItem] + :keyword comp: comp. Default value is "blobs". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp = kwargs.pop("comp", _params.pop("comp", "blobs")) # type: str + cls = kwargs.pop("cls", None) # type: ClsType[_models.FilterBlobSegment] + + request = build_filter_blobs_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + where=where, + marker=marker, + maxresults=maxresults, + include=include, + comp=comp, + version=self._config.version, + template_url=self.filter_blobs.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) # type: ignore + + pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("FilterBlobSegment", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + filter_blobs.metadata = {"url": "{url}"} # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_lease.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_lease.py new file mode 100644 index 00000000000..dbeb335d0a9 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_lease.py @@ -0,0 +1,351 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._serialize import get_modify_conditions + +if TYPE_CHECKING: + from datetime import datetime + + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(object): # pylint: disable=client-accepts-api-version-keyword + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.BlobClient or + ~azure.storage.blob.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[BlobClient, ContainerClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'blob_name'): + self._client = client._client.blob # type: ignore # pylint: disable=protected-access + elif hasattr(client, 'container_name'): + self._client = client._client.container # type: ignore # pylint: disable=protected-access + else: + raise TypeError("Lease must use either BlobClient or ContainerClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, lease_duration=-1, **kwargs): + # type: (int, **Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_list_blobs_helper.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_list_blobs_helper.py new file mode 100644 index 00000000000..6bbaa71c404 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_list_blobs_helper.py @@ -0,0 +1,334 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from urllib.parse import unquote + +from azure.core.paging import PageIterator, ItemPaged +from azure.core.exceptions import HttpResponseError + +from ._deserialize import ( + get_blob_properties_from_generated_code, + load_many_xml_nodes, + load_xml_int, + load_xml_string, + parse_tags, +) +from ._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix, FilterBlobItem +from ._generated._serialization import Deserializer +from ._models import BlobProperties, FilteredBlob +from ._shared.models import DictMixin +from ._shared.response_handlers import ( + return_context_and_deserialized, + return_raw_deserialized, + process_storage_error, +) + + +class IgnoreListBlobsDeserializer(Deserializer): + def __call__(self, target_obj, response_data, content_type=None): + if target_obj == "ListBlobsFlatSegmentResponse": + return None + super().__call__(target_obj, response_data, content_type) + + +class BlobPropertiesPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobNamesPaged(PageIterator): + """An Iterable of Blob names. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(str) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(BlobNamesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_raw_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.get('ServiceEndpoint') + self.prefix = load_xml_string(self._response, 'Prefix') + self.marker = load_xml_string(self._response, 'Marker') + self.results_per_page = load_xml_int(self._response, 'MaxResults') + self.container = self._response.get('ContainerName') + + blobs = load_many_xml_nodes(self._response, 'Blob', wrapper='Blobs') + self.current_page = [load_xml_string(blob, 'Name') for blob in blobs] + + next_marker = load_xml_string(self._response, 'NextMarker') + return next_marker or None, self.current_page + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + def _extract_data_cb(self, get_next_return): + continuation_token, _ = super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + if item.name.encoded: + name = unquote(item.name.content) + else: + name = item.name.content + return BlobPrefix( + self._command, + container=self.container, + prefix=name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item + + +class BlobPrefix(ItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str next_marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class FilteredBlobPaged(PageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.FilteredBlob) + :ivar str container: The container that the blobs are listed from. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + tags = parse_tags(item.tags) + blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) + return blob + return item diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_models.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_models.py new file mode 100644 index 00000000000..7d5c14e62b0 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_models.py @@ -0,0 +1,1331 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from enum import Enum + +from azure.core import CaseInsensitiveEnumMeta +from azure.core.paging import PageIterator +from azure.core.exceptions import HttpResponseError + +from ._shared import decode_base64_to_bytes +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import ArrowField +from ._generated.models import Logging as GeneratedLogging +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import StaticWebsite as GeneratedStaticWebsite +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import AccessPolicy as GenAccessPolicy + + +def parse_page_list(page_list): + """Parse a generated PageList into a single list of PageRange sorted by start. + """ + page_ranges = page_list.page_range + clear_ranges = page_list.clear_range + + ranges = [] + p_i, c_i = 0, 0 + + # Combine page ranges and clear ranges into single list, sorted by start + while p_i < len(page_ranges) and c_i < len(clear_ranges): + p, c = page_ranges[p_i], clear_ranges[c_i] + + if p.start < c.start: + ranges.append( + PageRange(p.start, p.end, cleared=False) + ) + p_i += 1 + else: + ranges.append( + PageRange(c.start, c.end, cleared=True) + ) + c_i += 1 + + # Grab remaining elements in either list + ranges += [PageRange(r.start, r.end, cleared=False) for r in page_ranges[p_i:]] + ranges += [PageRange(r.start, r.end, cleared=True) for r in clear_ranges[c_i:]] + + return ranges + + +class BlobType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + + BLOCKBLOB = "BlockBlob" + PAGEBLOB = "PageBlob" + APPENDBLOB = "AppendBlob" + + +class BlockState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Block blob block types.""" + + COMMITTED = 'Committed' #: Committed blocks. + LATEST = 'Latest' #: Latest blocks. + UNCOMMITTED = 'Uncommitted' #: Uncommitted blocks. + + +class StandardBlobTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ + Specifies the blob tier to set the blob to. This is only applicable for + block blobs on standard storage accounts. + """ + + ARCHIVE = 'Archive' #: Archive + COOL = 'Cool' #: Cool + COLD = 'Cold' #: Cold + HOT = 'Hot' #: Hot + + +class PremiumPageBlobTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ + Specifies the page blob tier to set the blob to. This is only applicable to page + blobs on premium storage accounts. Please take a look at: + https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets + for detailed information on the corresponding IOPS and throughput per PageBlobTier. + """ + + P4 = 'P4' #: P4 Tier + P6 = 'P6' #: P6 Tier + P10 = 'P10' #: P10 Tier + P20 = 'P20' #: P20 Tier + P30 = 'P30' #: P30 Tier + P40 = 'P40' #: P40 Tier + P50 = 'P50' #: P50 Tier + P60 = 'P60' #: P60 Tier + + +class QuickQueryDialect(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the quick query input/output dialect.""" + + DELIMITEDTEXT = 'DelimitedTextDialect' + DELIMITEDJSON = 'DelimitedJsonDialect' + PARQUET = 'ParquetDialect' + + +class SequenceNumberAction(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Sequence number actions.""" + + INCREMENT = 'increment' + """ + Increments the value of the sequence number by 1. If specifying this option, + do not include the x-ms-blob-sequence-number header. + """ + + MAX = 'max' + """ + Sets the sequence number to be the higher of the value included with the + request and the value currently stored for the blob. + """ + + UPDATE = 'update' + """Sets the sequence number to the value included with the request.""" + + +class PublicAccess(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ + Specifies whether data in the container may be accessed publicly and the level of access. + """ + + OFF = 'off' + """ + Specifies that there is no public read access for both the container and blobs within the container. + Clients cannot enumerate the containers within the storage account as well as the blobs within the container. + """ + + BLOB = 'blob' + """ + Specifies public read access for blobs. Blob data within this container can be read + via anonymous request, but container data is not available. Clients cannot enumerate + blobs within the container via anonymous request. + """ + + CONTAINER = 'container' + """ + Specifies full public read access for container and blob data. Clients can enumerate + blobs within the container via anonymous request, but cannot enumerate containers + within the storage account. + """ + + +class BlobImmutabilityPolicyMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ + Specifies the immutability policy mode to set on the blob. + "Mutable" can only be returned by service, don't set to "Mutable". + """ + + UNLOCKED = "Unlocked" + LOCKED = "Locked" + MUTABLE = "Mutable" + + +class BlobAnalyticsLogging(GeneratedLogging): + """Azure Analytics Logging settings. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool delete: + Indicates whether all delete requests should be logged. The default value is `False`. + :keyword bool read: + Indicates whether all read requests should be logged. The default value is `False`. + :keyword bool write: + Indicates whether all write requests should be logged. The default value is `False`. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.delete = kwargs.get('delete', False) + self.read = kwargs.get('read', False) + self.write = kwargs.get('write', False) + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + delete=generated.delete, + read=generated.read, + write=generated.write, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for blobs. + + :keyword str version: + The version of Storage Analytics to configure. The default value is 1.0. + :keyword bool enabled: + Indicates whether metrics are enabled for the Blob service. + The default value is `False`. + :keyword bool include_apis: + Indicates whether metrics should generate summary statistics for called API operations. + :keyword ~azure.storage.blob.RetentionPolicy retention_policy: + Determines how long the associated data should persist. If not specified the retention + policy will be disabled by default. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + :param bool enabled: + Indicates whether a retention policy is enabled for the storage service. + The default value is False. + :param int days: + Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. If enabled=True, the number of days must be specified. + """ + + def __init__(self, enabled=False, days=None): + super(RetentionPolicy, self).__init__(enabled=enabled, days=days, allow_permanent_delete=None) + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class StaticWebsite(GeneratedStaticWebsite): + """The properties that enable an account to host a static website. + + :keyword bool enabled: + Indicates whether this account is hosting a static website. + The default value is `False`. + :keyword str index_document: + The default name of the index page under each directory. + :keyword str error_document404_path: + The absolute path of the custom 404 page. + :keyword str default_index_document_path: + Absolute path of the default index page. + """ + + def __init__(self, **kwargs): + self.enabled = kwargs.get('enabled', False) + if self.enabled: + self.index_document = kwargs.get('index_document') + self.error_document404_path = kwargs.get('error_document404_path') + self.default_index_document_path = kwargs.get('default_index_document_path') + else: + self.index_document = None + self.error_document404_path = None + self.default_index_document_path = None + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + index_document=generated.index_document, + error_document404_path=generated.error_document404_path, + default_index_document_path=generated.default_index_document_path + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class ContainerProperties(DictMixin): + """Blob container's properties class. + + Returned ``ContainerProperties`` instances expose these values through a + dictionary interface, for example: ``container_props["last_modified"]``. + Additionally, the container name is available as ``container_props["name"]``. + + :ivar str name: + Name of the container. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the container was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the container. + :ivar str public_access: Specifies whether data in the container may be accessed + publicly and the level of access. + :ivar bool has_immutability_policy: + Represents whether the container has an immutability policy. + :ivar bool has_legal_hold: + Represents whether the container has a legal hold. + :ivar bool immutable_storage_with_versioning_enabled: + Represents whether immutable storage with versioning enabled on the container. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :ivar dict metadata: A dict with name-value pairs to associate with the + container as metadata. + :ivar ~azure.storage.blob.ContainerEncryptionScope encryption_scope: + The default encryption scope configuration for the container. + :ivar bool deleted: + Whether this container was deleted. + :ivar str version: + The version of a deleted container. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.lease = LeaseProperties(**kwargs) + self.public_access = kwargs.get('x-ms-blob-public-access') + self.has_immutability_policy = kwargs.get('x-ms-has-immutability-policy') + self.deleted = None + self.version = None + self.has_legal_hold = kwargs.get('x-ms-has-legal-hold') + self.metadata = kwargs.get('metadata') + self.encryption_scope = None + self.immutable_storage_with_versioning_enabled = kwargs.get('x-ms-immutable-storage-with-versioning-enabled') + default_encryption_scope = kwargs.get('x-ms-default-encryption-scope') + if default_encryption_scope: + self.encryption_scope = ContainerEncryptionScope( + default_encryption_scope=default_encryption_scope, + prevent_encryption_scope_override=kwargs.get('x-ms-deny-encryption-scope-override', False) + ) + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.public_access = generated.properties.public_access + props.has_immutability_policy = generated.properties.has_immutability_policy + props.immutable_storage_with_versioning_enabled = \ + generated.properties.is_immutable_storage_with_versioning_enabled + props.deleted = generated.deleted + props.version = generated.version + props.has_legal_hold = generated.properties.has_legal_hold + props.metadata = generated.metadata + props.encryption_scope = ContainerEncryptionScope._from_generated(generated) #pylint: disable=protected-access + return props + + +class ContainerPropertiesPaged(PageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class ImmutabilityPolicy(DictMixin): + """Optional parameters for setting the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime expiry_time: + Specifies the date time when the blobs immutability policy is set to expire. + :keyword str or ~azure.storage.blob.BlobImmutabilityPolicyMode policy_mode: + Specifies the immutability policy mode to set on the blob. + Possible values to set include: "Locked", "Unlocked". + "Mutable" can only be returned by service, don't set to "Mutable". + """ + + def __init__(self, **kwargs): + self.expiry_time = kwargs.pop('expiry_time', None) + self.policy_mode = kwargs.pop('policy_mode', None) + + @classmethod + def _from_generated(cls, generated): + immutability_policy = cls() + immutability_policy.expiry_time = generated.properties.immutability_policy_expires_on + immutability_policy.policy_mode = generated.properties.immutability_policy_mode + return immutability_policy + + +class BlobProperties(DictMixin): + """ + Blob Properties. + + :ivar str name: + The name of the blob. + :ivar str container: + The container in which the blob resides. + :ivar str snapshot: + Datetime value that uniquely identifies the blob snapshot. + :ivar ~azure.blob.storage.BlobType blob_type: + String indicating this blob's type. + :ivar dict metadata: + Name-value pairs associated with the blob as metadata. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the blob was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + The size of the content returned. If the entire blob was requested, + the length of blob in bytes. If a subset of the blob was requested, the + length of the returned subset. + :ivar str content_range: + Indicates the range of bytes returned in the event that the client + requested a subset of the blob. + :ivar int append_blob_committed_block_count: + (For Append Blobs) Number of committed blocks in the blob. + :ivar bool is_append_blob_sealed: + Indicate if the append blob is sealed or not. + + .. versionadded:: 12.4.0 + + :ivar int page_blob_sequence_number: + (For Page Blobs) Sequence number for page blob used for coordinating + concurrent writes. + :ivar bool server_encrypted: + Set to true if the blob is encrypted on the server. + :ivar ~azure.storage.blob.CopyProperties copy: + Stores all the copy properties for the blob. + :ivar ~azure.storage.blob.ContentSettings content_settings: + Stores all the content settings for the blob. + :ivar ~azure.storage.blob.LeaseProperties lease: + Stores all the lease information for the blob. + :ivar ~azure.storage.blob.StandardBlobTier blob_tier: + Indicates the access tier of the blob. The hot tier is optimized + for storing data that is accessed frequently. The cool storage tier + is optimized for storing data that is infrequently accessed and stored + for at least a month. The archive tier is optimized for storing + data that is rarely accessed and stored for at least six months + with flexible latency requirements. + :ivar str rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :ivar ~datetime.datetime blob_tier_change_time: + Indicates when the access tier was last changed. + :ivar bool blob_tier_inferred: + Indicates whether the access tier was inferred by the service. + If false, it indicates that the tier was set explicitly. + :ivar bool deleted: + Whether this blob was deleted. + :ivar ~datetime.datetime deleted_time: + A datetime object representing the time at which the blob was deleted. + :ivar int remaining_retention_days: + The number of days that the blob will be retained before being permanently deleted by the service. + :ivar ~datetime.datetime creation_time: + Indicates when the blob was created, in UTC. + :ivar str archive_status: + Archive status of blob. + :ivar str encryption_key_sha256: + The SHA-256 hash of the provided encryption key. + :ivar str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :ivar bool request_server_encrypted: + Whether this blob is encrypted. + :ivar list(~azure.storage.blob.ObjectReplicationPolicy) object_replication_source_properties: + Only present for blobs that have policy ids and rule ids applied to them. + + .. versionadded:: 12.4.0 + + :ivar str object_replication_destination_policy: + Represents the Object Replication Policy Id that created this blob. + + .. versionadded:: 12.4.0 + + :ivar ~datetime.datetime last_accessed_on: + Indicates when the last Read/Write operation was performed on a Blob. + + .. versionadded:: 12.6.0 + + :ivar int tag_count: + Tags count on this blob. + + .. versionadded:: 12.4.0 + + :ivar dict(str, str) tags: + Key value pair of tags on this blob. + + .. versionadded:: 12.4.0 + :ivar bool has_versions_only: + A true value indicates the root blob is deleted + + .. versionadded:: 12.10.0 + + :ivar ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :ivar bool has_legal_hold: + Specified if a legal hold should be set on the blob. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.container = None + self.snapshot = kwargs.get('x-ms-snapshot') + self.version_id = kwargs.get('x-ms-version-id') + self.is_current_version = kwargs.get('x-ms-is-current-version') + self.blob_type = BlobType(kwargs['x-ms-blob-type']) if kwargs.get('x-ms-blob-type') else None + self.metadata = kwargs.get('metadata') + self.encrypted_metadata = kwargs.get('encrypted_metadata') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.append_blob_committed_block_count = kwargs.get('x-ms-blob-committed-block-count') + self.is_append_blob_sealed = kwargs.get('x-ms-blob-sealed') + self.page_blob_sequence_number = kwargs.get('x-ms-blob-sequence-number') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.blob_tier = kwargs.get('x-ms-access-tier') + self.rehydrate_priority = kwargs.get('x-ms-rehydrate-priority') + self.blob_tier_change_time = kwargs.get('x-ms-access-tier-change-time') + self.blob_tier_inferred = kwargs.get('x-ms-access-tier-inferred') + self.deleted = False + self.deleted_time = None + self.remaining_retention_days = None + self.creation_time = kwargs.get('x-ms-creation-time') + self.archive_status = kwargs.get('x-ms-archive-status') + self.encryption_key_sha256 = kwargs.get('x-ms-encryption-key-sha256') + self.encryption_scope = kwargs.get('x-ms-encryption-scope') + self.request_server_encrypted = kwargs.get('x-ms-server-encrypted') + self.object_replication_source_properties = kwargs.get('object_replication_source_properties') + self.object_replication_destination_policy = kwargs.get('x-ms-or-policy-id') + self.last_accessed_on = kwargs.get('x-ms-last-access-time') + self.tag_count = kwargs.get('x-ms-tag-count') + self.tags = None + self.immutability_policy = ImmutabilityPolicy(expiry_time=kwargs.get('x-ms-immutability-policy-until-date'), + policy_mode=kwargs.get('x-ms-immutability-policy-mode')) + self.has_legal_hold = kwargs.get('x-ms-legal-hold') + self.has_versions_only = None + + +class FilteredBlob(DictMixin): + """Blob info from a Filter Blobs API call. + + :ivar name: Blob name + :type name: str + :ivar container_name: Container name. + :type container_name: str + :ivar tags: Key value pairs of blob tags. + :type tags: Dict[str, str] + """ + def __init__(self, **kwargs): + self.name = kwargs.get('name', None) + self.container_name = kwargs.get('container_name', None) + self.tags = kwargs.get('tags', None) + + +class LeaseProperties(DictMixin): + """Blob Lease Properties. + + :ivar str status: + The lease status of the blob. Possible values: locked|unlocked + :ivar str state: + Lease state of the blob. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a blob is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """The content settings of a blob. + + :param str content_type: + The content type specified for the blob. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the blob, that value is stored. + :param str content_language: + If the content_language has previously been set + for the blob, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the blob, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the blob, that value is stored. + :param bytearray content_md5: + If the content_md5 has been set for the blob, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class CopyProperties(DictMixin): + """Blob Copy Properties. + + These properties will be `None` if this blob has never been the destination + in a Copy Blob operation, or if this blob has been modified after a concluded + Copy Blob operation, for example, using Set Blob Properties, Upload Blob, or Commit Block List. + + :ivar str id: + String identifier for the last attempted Copy Blob operation where this blob + was the destination blob. + :ivar str source: + URL up to 2 KB in length that specifies the source blob used in the last attempted + Copy Blob operation where this blob was the destination blob. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy Blob. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy Blob operation where this blob was the destination blob. Can show + between 0 and Content-Length bytes copied. + :ivar ~datetime.datetime completion_time: + Conclusion time of the last attempted Copy Blob operation where this blob was the + destination blob. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar ~datetime.datetime destination_snapshot: + Included if the blob is incremental copy blob or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this blob. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class BlobBlock(DictMixin): + """BlockBlob Block class. + + :param str block_id: + Block id. + :param str state: + Block state. Possible values: committed|uncommitted + :ivar int size: + Block size in bytes. + """ + + def __init__(self, block_id, state=BlockState.Latest): + self.id = block_id + self.state = state + self.size = None + + @classmethod + def _from_generated(cls, generated): + try: + decoded_bytes = decode_base64_to_bytes(generated.name) + block_id = decoded_bytes.decode('utf-8') + # this is to fix a bug. When large blocks are uploaded through upload_blob the block id isn't base64 encoded + # while service expected block id is base64 encoded, so when we get block_id if we cannot base64 decode, it + # means we didn't base64 encode it when stage the block, we want to use the returned block_id directly. + except UnicodeDecodeError: + block_id = generated.name + block = cls(block_id) + block.size = generated.size + return block + + +class PageRange(DictMixin): + """Page Range for page blob. + + :param int start: + Start of page range in bytes. + :param int end: + End of page range in bytes. + :ivar bool cleared: + Whether the range has been cleared. + """ + + def __init__(self, start=None, end=None, *, cleared=False): + self.start = start + self.end = end + self.cleared = cleared + + +class PageRangePaged(PageIterator): + def __init__(self, command, results_per_page=None, continuation_token=None): + super(PageRangePaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = self._build_page(self._response) + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_page(response): + if not response: + raise StopIteration + + return parse_page_list(response) + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get access policy methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class ContainerSasPermissions(object): + """ContainerSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_container_sas` function and + for the AccessPolicies used with + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + + :param bool read: + Read the content, properties, metadata or block list of any blob in the + container. Use any blob in the container as the source of a copy operation. + :param bool write: + For any blob in the container, create or write content, properties, + metadata, or block list. Snapshot or lease the blob. Resize the blob + (page blob only). Use the blob as the destination of a copy operation + within the same account. Note: You cannot grant permissions to read or + write container properties or metadata, nor to lease a container, with + a container SAS. Use an account SAS instead. + :param bool delete: + Delete any blob in the container. Note: You cannot grant permissions to + delete a container with a container SAS. Use an account SAS instead. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + List blobs in the container. + :param bool tag: + Set or get tags on the blobs in the container. + :keyword bool add: + Add a block to an append blob. + :keyword bool create: + Write a new blob, snapshot a blob, or copy a blob to a new blob. + :keyword bool permanent_delete: + To enable permanent delete on the blob is permitted. + :keyword bool filter_by_tags: + To enable finding blobs by tags. + :keyword bool move: + Move a blob or a directory and its contents to a new location. + :keyword bool execute: + Get the system properties and, if the hierarchical namespace is enabled for the storage account, + get the POSIX ACL of a blob. + :keyword bool set_immutability_policy: + To enable operations related to set/delete immutability policy. + To get immutability policy, you just need read permission. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, delete_previous_version=False, tag=False, **kwargs): # pylint: disable=redefined-builtin + self.read = read + self.add = kwargs.pop('add', False) + self.create = kwargs.pop('create', False) + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.permanent_delete = kwargs.pop('permanent_delete', False) + self.list = list + self.tag = tag + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self.move = kwargs.pop('move', False) + self.execute = kwargs.pop('execute', False) + self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) + self._str = (('r' if self.read else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('y' if self.permanent_delete else '') + + ('l' if self.list else '') + + ('t' if self.tag else '') + + ('f' if self.filter_by_tags else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('i' if self.set_immutability_policy else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ContainerSasPermissions from a string. + + To specify read, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, write, delete, + and list permissions. + :return: A ContainerSasPermissions object + :rtype: ~azure.storage.blob.ContainerSasPermissions + """ + p_read = 'r' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_permanent_delete = 'y' in permission + p_list = 'l' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_set_immutability_policy = 'i' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, list=p_list, + delete_previous_version=p_delete_previous_version, tag=p_tag, add=p_add, + create=p_create, permanent_delete=p_permanent_delete, filter_by_tags=p_filter_by_tags, + move=p_move, execute=p_execute, set_immutability_policy=p_set_immutability_policy) + + return parsed + + +class BlobSasPermissions(object): + """BlobSasPermissions class to be used with the + :func:`~azure.storage.blob.generate_blob_sas` function. + + :param bool read: + Read the content, properties, metadata and block list. Use the blob as + the source of a copy operation. + :param bool add: + Add a block to an append blob. + :param bool create: + Write a new blob, snapshot a blob, or copy a blob to a new blob. + :param bool write: + Create or write content, properties, metadata, or block list. Snapshot + or lease the blob. Resize the blob (page blob only). Use the blob as the + destination of a copy operation within the same account. + :param bool delete: + Delete the blob. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool tag: + Set or get tags on the blob. + :keyword bool permanent_delete: + To enable permanent delete on the blob is permitted. + :keyword bool move: + Move a blob or a directory and its contents to a new location. + :keyword bool execute: + Get the system properties and, if the hierarchical namespace is enabled for the storage account, + get the POSIX ACL of a blob. + :keyword bool set_immutability_policy: + To enable operations related to set/delete immutability policy. + To get immutability policy, you just need read permission. + """ + def __init__(self, read=False, add=False, create=False, write=False, + delete=False, delete_previous_version=False, tag=False, **kwargs): + self.read = read + self.add = add + self.create = create + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.permanent_delete = kwargs.pop('permanent_delete', False) + self.tag = tag + self.move = kwargs.pop('move', False) + self.execute = kwargs.pop('execute', False) + self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) + self._str = (('r' if self.read else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('y' if self.permanent_delete else '') + + ('t' if self.tag else '') + + ('m' if self.move else '') + + ('e' if self.execute else '') + + ('i' if self.set_immutability_policy else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a BlobSasPermissions from a string. + + To specify read, add, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, add, create, + write, or delete permissions. + :return: A BlobSasPermissions object + :rtype: ~azure.storage.blob.BlobSasPermissions + """ + p_read = 'r' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_permanent_delete = 'y' in permission + p_tag = 't' in permission + p_move = 'm' in permission + p_execute = 'e' in permission + p_set_immutability_policy = 'i' in permission + + parsed = cls(read=p_read, add=p_add, create=p_create, write=p_write, delete=p_delete, + delete_previous_version=p_delete_previous_version, tag=p_tag, permanent_delete=p_permanent_delete, + move=p_move, execute=p_execute, set_immutability_policy=p_set_immutability_policy) + + return parsed + + +class CustomerProvidedEncryptionKey(object): + """ + All data in Azure Storage is encrypted at-rest using an account-level encryption key. + In versions 2018-06-17 and newer, you can manage the key used to encrypt blob contents + and application metadata per-blob by providing an AES-256 encryption key in requests to the storage service. + + When you use a customer-provided key, Azure Storage does not manage or persist your key. + When writing data to a blob, the provided key is used to encrypt your data before writing it to disk. + A SHA-256 hash of the encryption key is written alongside the blob contents, + and is used to verify that all subsequent operations against the blob use the same encryption key. + This hash cannot be used to retrieve the encryption key or decrypt the contents of the blob. + When reading a blob, the provided key is used to decrypt your data after reading it from disk. + In both cases, the provided encryption key is securely discarded + as soon as the encryption or decryption process completes. + + :param str key_value: + Base64-encoded AES-256 encryption key value. + :param str key_hash: + Base64-encoded SHA256 of the encryption key. + :ivar str algorithm: + Specifies the algorithm to use when encrypting data using the given key. Must be AES256. + """ + def __init__(self, key_value, key_hash): + self.key_value = key_value + self.key_hash = key_hash + self.algorithm = 'AES256' + + +class ContainerEncryptionScope(object): + """The default encryption scope configuration for a container. + + This scope is used implicitly for all future writes within the container, + but can be overridden per blob operation. + + .. versionadded:: 12.2.0 + + :param str default_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + :param bool prevent_encryption_scope_override: + If true, prevents any request from specifying a different encryption scope than the scope + set on the container. Default value is false. + """ + + def __init__(self, default_encryption_scope, **kwargs): + self.default_encryption_scope = default_encryption_scope + self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', False) + + @classmethod + def _from_generated(cls, generated): + if generated.properties.default_encryption_scope: + scope = cls( + generated.properties.default_encryption_scope, + prevent_encryption_scope_override=generated.properties.prevent_encryption_scope_override or False + ) + return scope + return None + + +class DelimitedJsonDialect(DictMixin): + """Defines the input or output JSON serialization for a blob data query. + + :keyword str delimiter: The line separator character, default value is '\n' + """ + + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', '\n') + + +class DelimitedTextDialect(DictMixin): + """Defines the input or output delimited (CSV) serialization for a blob query request. + + :keyword str delimiter: + Column separator, defaults to ','. + :keyword str quotechar: + Field quote, defaults to '"'. + :keyword str lineterminator: + Record separator, defaults to '\\\\n'. + :keyword str escapechar: + Escape char, defaults to empty. + :keyword bool has_header: + Whether the blob data includes headers in the first line. The default value is False, meaning that the + data will be returned inclusive of the first line. If set to True, the data will be returned exclusive + of the first line. + """ + def __init__(self, **kwargs): + self.delimiter = kwargs.pop('delimiter', ',') + self.quotechar = kwargs.pop('quotechar', '"') + self.lineterminator = kwargs.pop('lineterminator', '\n') + self.escapechar = kwargs.pop('escapechar', "") + self.has_header = kwargs.pop('has_header', False) + + +class ArrowDialect(ArrowField): + """field of an arrow schema. + + All required parameters must be populated in order to send to Azure. + + :param ~azure.storage.blob.ArrowType type: Arrow field type. + :keyword str name: The name of the field. + :keyword int precision: The precision of the field. + :keyword int scale: The scale of the field. + """ + def __init__(self, type, **kwargs): # pylint: disable=redefined-builtin + super(ArrowDialect, self).__init__(type=type, **kwargs) + + +class ArrowType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + + INT64 = "int64" + BOOL = "bool" + TIMESTAMP_MS = "timestamp[ms]" + STRING = "string" + DOUBLE = "double" + DECIMAL = 'decimal' + + +class ObjectReplicationPolicy(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str policy_id: + Policy id for the blob. A replication policy gets created (policy id) when creating a source/destination pair. + :ivar list(~azure.storage.blob.ObjectReplicationRule) rules: + Within each policy there may be multiple replication rules. + e.g. rule 1= src/container/.pdf to dst/container2/; rule2 = src/container1/.jpg to dst/container3 + """ + + def __init__(self, **kwargs): + self.policy_id = kwargs.pop('policy_id', None) + self.rules = kwargs.pop('rules', None) + + +class ObjectReplicationRule(DictMixin): + """Policy id and rule ids applied to a blob. + + :ivar str rule_id: + Rule id. + :ivar str status: + The status of the rule. It could be "Complete" or "Failed" + """ + + def __init__(self, **kwargs): + self.rule_id = kwargs.pop('rule_id', None) + self.status = kwargs.pop('status', None) + + +class BlobQueryError(object): + """The error happened during quick query operation. + + :ivar str error: + The name of the error. + :ivar bool is_fatal: + If true, this error prevents further query processing. More result data may be returned, + but there is no guarantee that all of the original data will be processed. + If false, this error does not prevent further query processing. + :ivar str description: + A description of the error. + :ivar int position: + The blob offset at which the error occurred. + """ + def __init__(self, error=None, is_fatal=False, description=None, position=None): + self.error = error + self.is_fatal = is_fatal + self.description = description + self.position = position diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_quick_query_helper.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_quick_query_helper.py new file mode 100644 index 00000000000..3164337308c --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_quick_query_helper.py @@ -0,0 +1,195 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from io import BytesIO +from typing import Union, Iterable, IO # pylint: disable=unused-import + +from ._shared.avro.datafile import DataFileReader +from ._shared.avro.avro_io import DatumReader + + +class BlobQueryReader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to read query results. + + :ivar str name: + The name of the blob being quered. + :ivar str container: + The name of the container where the blob is. + :ivar dict response_headers: + The response_headers of the quick query request. + :ivar bytes record_delimiter: + The delimiter used to separate lines, or records with the data. The `records` + method will return these lines via a generator. + """ + + def __init__( + self, + name=None, + container=None, + errors=None, + record_delimiter='\n', + encoding=None, + headers=None, + response=None, + error_cls=None, + ): + self.name = name + self.container = container + self.response_headers = headers + self.record_delimiter = record_delimiter + self._size = 0 + self._bytes_processed = 0 + self._errors = errors + self._encoding = encoding + self._parsed_results = DataFileReader(QuickQueryStreamer(response), DatumReader()) + self._first_result = self._process_record(next(self._parsed_results)) + self._error_cls = error_cls + + def __len__(self): + return self._size + + def _process_record(self, result): + self._size = result.get('totalBytes', self._size) + self._bytes_processed = result.get('bytesScanned', self._bytes_processed) + if 'data' in result: + return result.get('data') + if 'fatal' in result: + error = self._error_cls( + error=result['name'], + is_fatal=result['fatal'], + description=result['description'], + position=result['position'] + ) + if self._errors: + self._errors(error) + return None + + def _iter_stream(self): + if self._first_result is not None: + yield self._first_result + for next_result in self._parsed_results: + processed_result = self._process_record(next_result) + if processed_result is not None: + yield processed_result + + def readall(self): + # type: () -> Union[bytes, str] + """Return all query results. + + This operation is blocking until all data is downloaded. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Union[bytes, str] + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def readinto(self, stream): + # type: (IO) -> None + """Download the query result to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. + :returns: None + """ + for record in self._iter_stream(): + stream.write(record) + + def records(self): + # type: () -> Iterable[Union[bytes, str]] + """Returns a record generator for the query result. + + Records will be returned line by line. + If encoding has been configured - this will be used to decode individual + records are they are received. + + :rtype: Iterable[Union[bytes, str]] + """ + delimiter = self.record_delimiter.encode('utf-8') + for record_chunk in self._iter_stream(): + for record in record_chunk.split(delimiter): + if self._encoding: + yield record.decode(self._encoding) + else: + yield record + + +class QuickQueryStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator): + self.generator = generator + self.iterator = iter(generator) + self._buf = b"" + self._point = 0 + self._download_offset = 0 + self._buf_start = 0 + self.file_length = None + + def __len__(self): + return self.file_length + + def __iter__(self): + return self.iterator + + @staticmethod + def seekable(): + return True + + def __next__(self): + next_part = next(self.iterator) + self._download_offset += len(next_part) + return next_part + + next = __next__ # Python 2 compatibility. + + def tell(self): + return self._point + + def seek(self, offset, whence=0): + if whence == 0: + self._point = offset + elif whence == 1: + self._point += offset + else: + raise ValueError("whence must be 0, or 1") + if self._point < 0: + self._point = 0 # XXX is this right? + + def read(self, size): + try: + # keep reading from the generator until the buffer of this stream has enough data to read + while self._point + size > self._download_offset: + self._buf += self.__next__() + except StopIteration: + self.file_length = self._download_offset + + start_point = self._point + + # EOF + self._point = min(self._point + size, self._download_offset) + + relative_start = start_point - self._buf_start + if relative_start < 0: + raise ValueError("Buffer has dumped too much data") + relative_end = relative_start + size + data = self._buf[relative_start: relative_end] + + # dump the extra data in buffer + # buffer start--------------------16bytes----current read position + dumped_size = max(relative_end - 16 - relative_start, 0) + self._buf_start += dumped_size + self._buf = self._buf[dumped_size:] + + return data diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_serialize.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_serialize.py new file mode 100644 index 00000000000..ce1f2746fa2 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_serialize.py @@ -0,0 +1,219 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import ( # pylint: disable=unused-import + Any, Dict, Optional, Tuple, Union, + TYPE_CHECKING) + +try: + from urllib.parse import quote +except ImportError: + from urllib2 import quote # type: ignore + +from azure.core import MatchConditions + +from ._models import ( + ContainerEncryptionScope, + DelimitedJsonDialect) +from ._generated.models import ( + ModifiedAccessConditions, + SourceModifiedAccessConditions, + CpkScopeInfo, + ContainerCpkScopeInfo, + QueryFormat, + QuerySerialization, + DelimitedTextConfiguration, + JsonTextConfiguration, + ArrowConfiguration, + QueryFormatType, + BlobTag, + BlobTags, LeaseAccessConditions +) + +if TYPE_CHECKING: + from ._lease import BlobLeaseClient + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-10-10', + '2019-12-12', + '2020-02-10', + '2020-04-08', + '2020-06-12', + '2020-08-04', + '2020-10-02', + '2020-12-06', + '2021-02-12', + '2021-04-10', + '2021-06-08', + '2021-08-06', + '2021-12-02', + '2022-11-02' +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (Dict[str, Any], str, str) -> Tuple(Dict[str, Any], Optional[str], Optional[str]) + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if kwargs.get(etag_param): + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_access_conditions(lease): + # type: (Optional[Union[BlobLeaseClient, str]]) -> Union[LeaseAccessConditions, None] + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_modify_conditions(kwargs): + # type: (Dict[str, Any]) -> ModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'match_condition', 'etag') + return ModifiedAccessConditions( + if_modified_since=kwargs.pop('if_modified_since', None), + if_unmodified_since=kwargs.pop('if_unmodified_since', None), + if_match=if_match or kwargs.pop('if_match', None), + if_none_match=if_none_match or kwargs.pop('if_none_match', None), + if_tags=kwargs.pop('if_tags_match_condition', None) + ) + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None), + source_if_tags=kwargs.pop('source_if_tags_match_condition', None) + ) + + +def get_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> CpkScopeInfo + if 'encryption_scope' in kwargs: + return CpkScopeInfo(encryption_scope=kwargs.pop('encryption_scope')) + return None + + +def get_container_cpk_scope_info(kwargs): + # type: (Dict[str, Any]) -> ContainerCpkScopeInfo + encryption_scope = kwargs.pop('container_encryption_scope', None) + if encryption_scope: + if isinstance(encryption_scope, ContainerEncryptionScope): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope.default_encryption_scope, + prevent_encryption_scope_override=encryption_scope.prevent_encryption_scope_override + ) + if isinstance(encryption_scope, dict): + return ContainerCpkScopeInfo( + default_encryption_scope=encryption_scope['default_encryption_scope'], + prevent_encryption_scope_override=encryption_scope.get('prevent_encryption_scope_override') + ) + raise TypeError("Container encryption scope must be dict or type ContainerEncryptionScope.") + return None + + +def get_api_version(kwargs): + # type: (Dict[str, Any]) -> str + api_version = kwargs.get('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or _SUPPORTED_API_VERSIONS[-1] + + +def serialize_blob_tags_header(tags=None): + # type: (Optional[Dict[str, str]]) -> str + if tags is None: + return None + + components = list() + if tags: + for key, value in tags.items(): + components.append(quote(key, safe='.-')) + components.append('=') + components.append(quote(value, safe='.-')) + components.append('&') + + if components: + del components[-1] + + return ''.join(components) + + +def serialize_blob_tags(tags=None): + # type: (Optional[Dict[str, str]]) -> Union[BlobTags, None] + tag_list = list() + if tags: + tag_list = [BlobTag(key=k, value=v) for k, v in tags.items()] + return BlobTags(blob_tag_set=tag_list) + + +def serialize_query_format(formater): + if formater == "ParquetDialect": + qq_format = QueryFormat( + type=QueryFormatType.PARQUET, + parquet_text_configuration=' ' + ) + elif isinstance(formater, DelimitedJsonDialect): + serialization_settings = JsonTextConfiguration( + record_separator=formater.delimiter + ) + qq_format = QueryFormat( + type=QueryFormatType.json, + json_text_configuration=serialization_settings) + elif hasattr(formater, 'quotechar'): # This supports a csv.Dialect as well + try: + headers = formater.has_header + except AttributeError: + headers = False + serialization_settings = DelimitedTextConfiguration( + column_separator=formater.delimiter, + field_quote=formater.quotechar, + record_separator=formater.lineterminator, + escape_char=formater.escapechar, + headers_present=headers + ) + qq_format = QueryFormat( + type=QueryFormatType.delimited, + delimited_text_configuration=serialization_settings + ) + elif isinstance(formater, list): + serialization_settings = ArrowConfiguration( + schema=formater + ) + qq_format = QueryFormat( + type=QueryFormatType.arrow, + arrow_configuration=serialization_settings) + elif not formater: + return None + else: + raise TypeError("Format must be DelimitedTextDialect or DelimitedJsonDialect or ParquetDialect.") + return QuerySerialization(format=qq_format) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/__init__.py new file mode 100644 index 00000000000..a8b1a27d48f --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/__init__.py @@ -0,0 +1,54 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, str): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, str): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, str): + key = key.encode('utf-8') + if isinstance(string_to_sign, str): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/authentication.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/authentication.py new file mode 100644 index 00000000000..71d103cac92 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/authentication.py @@ -0,0 +1,188 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import re +from typing import List, Tuple +from urllib.parse import unquote, urlparse + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + +logger = logging.getLogger(__name__) + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + return desired_type(msg) + +# This method attempts to emulate the sorting done by the service +def _storage_header_sort(input_headers: List[Tuple[str, str]]) -> List[Tuple[str, str]]: + # Define the custom alphabet for weights + custom_weights = "-!#$%&*.^_|~+\"\'(),/`~0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz{}" + + # Build dict of tuples and list of keys + header_dict = dict() + header_keys = [] + for k, v in input_headers: + header_dict[k] = v + header_keys.append(k) + + # Sort according to custom defined weights + try: + header_keys = sorted(header_keys, key=lambda word: [custom_weights.index(c) for c in word]) + except ValueError: + raise ValueError("Illegal character encountered when sorting headers.") + + # Build list of sorted tuples + sorted_headers = [] + for key in header_keys: + sorted_headers.append((key, header_dict.get(key))) + return sorted_headers + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers = _storage_header_sort(x_ms_headers) + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + # logger.debug("String_to_sign=%s", string_to_sign) + + +class StorageHttpChallenge(object): + def __init__(self, challenge): + """ Parses an HTTP WWW-Authentication Bearer challenge from the Storage service. """ + if not challenge: + raise ValueError("Challenge cannot be empty") + + self._parameters = {} + self.scheme, trimmed_challenge = challenge.strip().split(" ", 1) + + # name=value pairs either comma or space separated with values possibly being + # enclosed in quotes + for item in re.split('[, ]', trimmed_challenge): + comps = item.split("=") + if len(comps) == 2: + key = comps[0].strip(' "') + value = comps[1].strip(' "') + if key: + self._parameters[key] = value + + # Extract and verify required parameters + self.authorization_uri = self._parameters.get('authorization_uri') + if not self.authorization_uri: + raise ValueError("Authorization Uri not found") + + self.resource_id = self._parameters.get('resource_id') + if not self.resource_id: + raise ValueError("Resource id not found") + + uri_path = urlparse(self.authorization_uri).path.lstrip("/") + self.tenant_id = uri_path.split("/")[0] + + def get_value(self, key): + return self._parameters.get(key) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/__init__.py new file mode 100644 index 00000000000..5b396cd202e --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/__init__.py @@ -0,0 +1,5 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/avro_io.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/avro_io.py new file mode 100644 index 00000000000..93a5c134849 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/avro_io.py @@ -0,0 +1,464 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import json +import logging +import struct +import sys + +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +STRUCT_FLOAT = struct.Struct('= 0), n + input_bytes = self.reader.read(n) + if n > 0 and not input_bytes: + raise StopIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return self.read_long() + + def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(self.read(4))[0] + + def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(self.read(8))[0] + + def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = self.read_long() + assert (nbytes >= 0), nbytes + return self.read(nbytes) + + def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + def skip_boolean(self): + self.skip(1) + + def skip_int(self): + self.skip_long() + + def skip_long(self): + b = ord(self.read(1)) + while (b & 0x80) != 0: + b = ord(self.read(1)) + + def skip_float(self): + self.skip(4) + + def skip_double(self): + self.skip(8) + + def skip_bytes(self): + self.skip(self.read_long()) + + def skip_utf8(self): + self.skip_bytes() + + def skip(self, n): + self.reader.seek(self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class DatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + def read(self, decoder): + return self.read_data(self.writer_schema, decoder) + + def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = decoder.read_boolean() + elif writer_schema.type == 'string': + result = decoder.read_utf8() + elif writer_schema.type == 'int': + result = decoder.read_int() + elif writer_schema.type == 'long': + result = decoder.read_long() + elif writer_schema.type == 'float': + result = decoder.read_float() + elif writer_schema.type == 'double': + result = decoder.read_double() + elif writer_schema.type == 'bytes': + result = decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = decoder.skip_boolean() + elif writer_schema.type == 'string': + result = decoder.skip_utf8() + elif writer_schema.type == 'int': + result = decoder.skip_int() + elif writer_schema.type == 'long': + result = decoder.skip_long() + elif writer_schema.type == 'float': + result = decoder.skip_float() + elif writer_schema.type == 'double': + result = decoder.skip_double() + elif writer_schema.type == 'bytes': + result = decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.skip_enum(decoder) + elif writer_schema.type == 'array': + self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return decoder.read(writer_schema.size) + + @staticmethod + def skip_fixed(writer_schema, decoder): + return decoder.skip(writer_schema.size) + + @staticmethod + def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + def skip_enum(decoder): + return decoder.skip_int() + + def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + read_items.append(self.read_data(writer_schema.items, decoder)) + block_count = decoder.read_long() + return read_items + + def skip_array(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + self.skip_data(writer_schema.items, decoder) + block_count = decoder.read_long() + + def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + key = decoder.read_utf8() + read_items[key] = self.read_data(writer_schema.values, decoder) + block_count = decoder.read_long() + return read_items + + def skip_map(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + decoder.skip_utf8() + self.skip_data(writer_schema.values, decoder) + block_count = decoder.read_long() + + def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return self.read_data(selected_writer_schema, decoder) + + def skip_union(self, writer_schema, decoder): + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/avro_io_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/avro_io_async.py new file mode 100644 index 00000000000..e9812163795 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/avro_io_async.py @@ -0,0 +1,448 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import logging +import sys + +from ..avro import schema + +from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Decoder + + +class AsyncBinaryDecoder(object): + """Read leaf values.""" + + def __init__(self, reader): + """ + reader is a Python object on which we can call read, seek, and tell. + """ + self._reader = reader + + @property + def reader(self): + """Reports the reader used by this decoder.""" + return self._reader + + async def read(self, n): + """Read n bytes. + + Args: + n: Number of bytes to read. + Returns: + The next n bytes from the input. + """ + assert (n >= 0), n + input_bytes = await self.reader.read(n) + if n > 0 and not input_bytes: + raise StopAsyncIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + async def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(await self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + async def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return await self.read_long() + + async def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(await self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(await self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + async def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(await self.read(4))[0] + + async def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(await self.read(8))[0] + + async def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = await self.read_long() + assert (nbytes >= 0), nbytes + return await self.read(nbytes) + + async def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = await self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + async def skip_boolean(self): + await self.skip(1) + + async def skip_int(self): + await self.skip_long() + + async def skip_long(self): + b = ord(await self.read(1)) + while (b & 0x80) != 0: + b = ord(await self.read(1)) + + async def skip_float(self): + await self.skip(4) + + async def skip_double(self): + await self.skip(8) + + async def skip_bytes(self): + await self.skip(await self.read_long()) + + async def skip_utf8(self): + await self.skip_bytes() + + async def skip(self, n): + await self.reader.seek(await self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class AsyncDatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema", and the schema expected by the + reader the "reader's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + async def read(self, decoder): + return await self.read_data(self.writer_schema, decoder) + + async def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = await decoder.read_boolean() + elif writer_schema.type == 'string': + result = await decoder.read_utf8() + elif writer_schema.type == 'int': + result = await decoder.read_int() + elif writer_schema.type == 'long': + result = await decoder.read_long() + elif writer_schema.type == 'float': + result = await decoder.read_float() + elif writer_schema.type == 'double': + result = await decoder.read_double() + elif writer_schema.type == 'bytes': + result = await decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = await self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = await self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = await self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = await self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = await self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + async def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = await decoder.skip_boolean() + elif writer_schema.type == 'string': + result = await decoder.skip_utf8() + elif writer_schema.type == 'int': + result = await decoder.skip_int() + elif writer_schema.type == 'long': + result = await decoder.skip_long() + elif writer_schema.type == 'float': + result = await decoder.skip_float() + elif writer_schema.type == 'double': + result = await decoder.skip_double() + elif writer_schema.type == 'bytes': + result = await decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = await self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.skip_enum(decoder) + elif writer_schema.type == 'array': + await self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + await self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = await self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + await self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + async def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return await decoder.read(writer_schema.size) + + @staticmethod + async def skip_fixed(writer_schema, decoder): + return await decoder.skip(writer_schema.size) + + @staticmethod + async def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = await decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + async def skip_enum(decoder): + return await decoder.skip_int() + + async def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + read_items.append(await self.read_data(writer_schema.items, decoder)) + block_count = await decoder.read_long() + return read_items + + async def skip_array(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await self.skip_data(writer_schema.items, decoder) + block_count = await decoder.read_long() + + async def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + key = await decoder.read_utf8() + read_items[key] = await self.read_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + return read_items + + async def skip_map(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await decoder.skip_utf8() + await self.skip_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + + async def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return await self.read_data(selected_writer_schema, decoder) + + async def skip_union(self, writer_schema, decoder): + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + async def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = await self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + async def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + await self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/datafile.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/datafile.py new file mode 100644 index 00000000000..df06fe0cfe7 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/datafile.py @@ -0,0 +1,266 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import io +import logging +import sys +import zlib + +from ..avro import avro_io +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Version of the container file: +VERSION = 1 + +if PY3: + MAGIC = b'Obj' + bytes([VERSION]) + MAGIC_SIZE = len(MAGIC) +else: + MAGIC = 'Obj' + chr(VERSION) + MAGIC_SIZE = len(MAGIC) + +# Size of the synchronization marker, in number of bytes: +SYNC_SIZE = 16 + +# Schema of the container header: +META_SCHEMA = schema.parse(""" +{ + "type": "record", "name": "org.apache.avro.file.Header", + "fields": [{ + "name": "magic", + "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} + }, { + "name": "meta", + "type": {"type": "map", "values": "bytes"} + }, { + "name": "sync", + "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} + }] +} +""" % { + 'magic_size': MAGIC_SIZE, + 'sync_size': SYNC_SIZE, +}) + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null', 'deflate']) + +# Metadata key associated to the schema: +SCHEMA_KEY = "avro.schema" + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class DataFileException(schema.AvroException): + """Problem reading or writing file object containers.""" + +# ------------------------------------------------------------------------------ + + +class DataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io.BinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else avro_io.BinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + self._reader.seek(0, 0) + + # read the header: magic, meta, sync + self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + self._cur_object_index = 0 + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + + def __enter__(self): + return self + + def __exit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __iter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + header_reader.seek(0, 0) + + # read header into a dict + header = self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + def _read_block_header(self): + self._block_count = self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + elif self.codec == 'deflate': + # Compressed data is stored as (length, data), which + # corresponds to how the "bytes" type is encoded. + data = self.raw_decoder.read_bytes() + # -15 is the log of the window size; negative indicates + # "raw" (no zlib headers) decompression. See zlib.h. + uncompressed = zlib.decompress(data, -15) + self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopIteration + if proposed_sync_marker != self.sync_marker: + self.reader.seek(-SYNC_SIZE, 1) + + def __next__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + self._cur_object_index = 0 + + self._read_block_header() + + datum = self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + self.reader.track_object_position() + self.reader.set_object_index(0) + else: + self.reader.set_object_index(self._cur_object_index) + + return datum + + # PY2 + def next(self): + return self.__next__() + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/datafile_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/datafile_async.py new file mode 100644 index 00000000000..1e9d018228d --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/datafile_async.py @@ -0,0 +1,215 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import logging +import sys + +from ..avro import avro_io_async +from ..avro import schema +from .datafile import DataFileException +from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY + + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null']) + + +class AsyncDataFileReader(object): # pylint: disable=too-many-instance-attributes + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader, **kwargs): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) + self._header_reader = kwargs.pop('header_reader', None) + self._header_decoder = None if self._header_reader is None else \ + avro_io_async.AsyncBinaryDecoder(self._header_reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + self.codec = "null" + self._block_count = 0 + self._cur_object_index = 0 + self._meta = None + self._sync_marker = None + + async def init(self): + # In case self._reader only has partial content(without header). + # seek(0, 0) to make sure read the (partial)content from beginning. + await self._reader.seek(0, 0) + + # read the header: magic, meta, sync + await self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro. + if hasattr(self._reader, 'object_position'): + self.reader.track_object_position() + + # header_reader indicates reader only has partial content. The reader doesn't have block header, + # so we read use the block count stored last time. + # Also ChangeFeed only has codec==null, so use _raw_decoder is good. + if self._header_reader is not None: + self._datum_decoder = self._raw_decoder + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + return self + + async def __aenter__(self): + return self + + async def __aexit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __aiter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + async def _read_header(self): + header_reader = self._header_reader if self._header_reader else self._reader + header_decoder = self._header_decoder if self._header_decoder else self._raw_decoder + + # seek to the beginning of the file to get magic block + await header_reader.seek(0, 0) + + # read header into a dict + header = await self.datum_reader.read_data(META_SCHEMA, header_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + async def _read_block_header(self): + self._block_count = await self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + await self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + async def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = await self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopAsyncIteration + if proposed_sync_marker != self.sync_marker: + await self.reader.seek(-SYNC_SIZE, 1) + + async def __anext__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + await self._skip_sync() + + # object_position is to support reading from current position in the future read, + # no need to downloading from the beginning of avro file with this attr. + if hasattr(self._reader, 'object_position'): + await self.reader.track_object_position() + self._cur_object_index = 0 + + await self._read_block_header() + + datum = await self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + self._cur_object_index += 1 + + # object_position is to support reading from current position in the future read, + # This will track the index of the next item to be read. + # This will also track the offset before the next sync marker. + if hasattr(self._reader, 'object_position'): + if self.block_count == 0: + # the next event to be read is at index 0 in the new chunk of blocks, + await self.reader.track_object_position() + await self.reader.set_object_index(0) + else: + await self.reader.set_object_index(self._cur_object_index) + + return datum + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/schema.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/schema.py new file mode 100644 index 00000000000..34fa5980a5f --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/avro/schema.py @@ -0,0 +1,1221 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +"""Representation of Avro schemas. + +A schema may be one of: + - A record, mapping field names to field value data; + - An error, equivalent to a record; + - An enum, containing one of a small set of symbols; + - An array of values, all of the same schema; + - A map containing string/value pairs, each of a declared schema; + - A union of other schemas; + - A fixed sized binary object; + - A unicode string; + - A sequence of bytes; + - A 32-bit signed int; + - A 64-bit signed long; + - A 32-bit floating-point float; + - A 64-bit floating-point double; + - A boolean; + - Null. +""" + +import abc +import json +import logging +import re +import sys +from six import with_metaclass + +PY2 = sys.version_info[0] == 2 + +if PY2: + _str = unicode # pylint: disable=undefined-variable +else: + _str = str + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Log level more verbose than DEBUG=10, INFO=20, etc. +DEBUG_VERBOSE = 5 + +NULL = 'null' +BOOLEAN = 'boolean' +STRING = 'string' +BYTES = 'bytes' +INT = 'int' +LONG = 'long' +FLOAT = 'float' +DOUBLE = 'double' +FIXED = 'fixed' +ENUM = 'enum' +RECORD = 'record' +ERROR = 'error' +ARRAY = 'array' +MAP = 'map' +UNION = 'union' + +# Request and error unions are part of Avro protocols: +REQUEST = 'request' +ERROR_UNION = 'error_union' + +PRIMITIVE_TYPES = frozenset([ + NULL, + BOOLEAN, + STRING, + BYTES, + INT, + LONG, + FLOAT, + DOUBLE, +]) + +NAMED_TYPES = frozenset([ + FIXED, + ENUM, + RECORD, + ERROR, +]) + +VALID_TYPES = frozenset.union( + PRIMITIVE_TYPES, + NAMED_TYPES, + [ + ARRAY, + MAP, + UNION, + REQUEST, + ERROR_UNION, + ], +) + +SCHEMA_RESERVED_PROPS = frozenset([ + 'type', + 'name', + 'namespace', + 'fields', # Record + 'items', # Array + 'size', # Fixed + 'symbols', # Enum + 'values', # Map + 'doc', +]) + +FIELD_RESERVED_PROPS = frozenset([ + 'default', + 'name', + 'doc', + 'order', + 'type', +]) + +VALID_FIELD_SORT_ORDERS = frozenset([ + 'ascending', + 'descending', + 'ignore', +]) + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class Error(Exception): + """Base class for errors in this module.""" + + +class AvroException(Error): + """Generic Avro schema error.""" + + +class SchemaParseException(AvroException): + """Error while parsing a JSON schema descriptor.""" + + +class Schema(with_metaclass(abc.ABCMeta, object)): + """Abstract base class for all Schema classes.""" + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object. + + Args: + data_type: Type of the schema to initialize. + other_props: Optional dictionary of additional properties. + """ + if data_type not in VALID_TYPES: + raise SchemaParseException('%r is not a valid Avro type.' % data_type) + + # All properties of this schema, as a map: property name -> property value + self._props = {} + + self._props['type'] = data_type + self._type = data_type + + if other_props: + self._props.update(other_props) + + @property + def namespace(self): + """Returns: the namespace this schema belongs to, if any, or None.""" + return self._props.get('namespace', None) + + @property + def type(self): + """Returns: the type of this schema.""" + return self._type + + @property + def doc(self): + """Returns: the documentation associated to this schema, if any, or None.""" + return self._props.get('doc', None) + + @property + def props(self): + """Reports all the properties of this schema. + + Includes all properties, reserved and non reserved. + JSON properties of this schema are directly generated from this dict. + + Returns: + A dictionary of properties associated to this schema. + """ + return self._props + + @property + def other_props(self): + """Returns: the dictionary of non-reserved properties.""" + return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) + + def __str__(self): + """Returns: the JSON representation of this schema.""" + return json.dumps(self.to_json(names=None)) + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + + +# ------------------------------------------------------------------------------ + + +_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') + +_RE_FULL_NAME = re.compile( + r'^' + r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace + r'([A-Za-z_][A-Za-z0-9_]*)' # name + r'$' +) + + +class Name(object): + """Representation of an Avro name.""" + + def __init__(self, name, namespace=None): + """Parses an Avro name. + + Args: + name: Avro name to parse (relative or absolute). + namespace: Optional explicit namespace if the name is relative. + """ + # Normalize: namespace is always defined as a string, possibly empty. + if namespace is None: + namespace = '' + + if '.' in name: + # name is absolute, namespace is ignored: + self._fullname = name + + match = _RE_FULL_NAME.match(self._fullname) + if match is None: + raise SchemaParseException( + 'Invalid absolute schema name: %r.' % self._fullname) + + self._name = match.group(1) + self._namespace = self._fullname[:-(len(self._name) + 1)] + + else: + # name is relative, combine with explicit namespace: + self._name = name + self._namespace = namespace + self._fullname = (self._name + if (not self._namespace) else + '%s.%s' % (self._namespace, self._name)) + + # Validate the fullname: + if _RE_FULL_NAME.match(self._fullname) is None: + raise SchemaParseException( + 'Invalid schema name %r inferred from name %r and namespace %r.' + % (self._fullname, self._name, self._namespace)) + + def __eq__(self, other): + if not isinstance(other, Name): + return NotImplemented + return self.fullname == other.fullname + + @property + def simple_name(self): + """Returns: the simple name part of this name.""" + return self._name + + @property + def namespace(self): + """Returns: this name's namespace, possible the empty string.""" + return self._namespace + + @property + def fullname(self): + """Returns: the full name.""" + return self._fullname + + +# ------------------------------------------------------------------------------ + + +class Names(object): + """Tracks Avro named schemas and default namespace during parsing.""" + + def __init__(self, default_namespace=None, names=None): + """Initializes a new name tracker. + + Args: + default_namespace: Optional default namespace. + names: Optional initial mapping of known named schemas. + """ + if names is None: + names = {} + self._names = names + self._default_namespace = default_namespace + + @property + def names(self): + """Returns: the mapping of known named schemas.""" + return self._names + + @property + def default_namespace(self): + """Returns: the default namespace, if any, or None.""" + return self._default_namespace + + def new_with_default_namespace(self, namespace): + """Creates a new name tracker from this tracker, but with a new default ns. + + Args: + namespace: New default namespace to use. + Returns: + New name tracker with the specified default namespace. + """ + return Names(names=self._names, default_namespace=namespace) + + def get_name(self, name, namespace=None): + """Resolves the Avro name according to this name tracker's state. + + Args: + name: Name to resolve (absolute or relative). + namespace: Optional explicit namespace. + Returns: + The specified name, resolved according to this tracker. + """ + if namespace is None: + namespace = self._default_namespace + return Name(name=name, namespace=namespace) + + def get_schema(self, name, namespace=None): + """Resolves an Avro schema by name. + + Args: + name: Name (relative or absolute) of the Avro schema to look up. + namespace: Optional explicit namespace. + Returns: + The schema with the specified name, if any, or None. + """ + avro_name = self.get_name(name=name, namespace=namespace) + return self._names.get(avro_name.fullname, None) + + def prune_namespace(self, properties): + """given a properties, return properties with namespace removed if + it matches the own default namespace + """ + if self.default_namespace is None: + # I have no default -- no change + return properties + if 'namespace' not in properties: + # he has no namespace - no change + return properties + if properties['namespace'] != self.default_namespace: + # we're different - leave his stuff alone + return properties + # we each have a namespace and it's redundant. delete his. + prunable = properties.copy() + del prunable['namespace'] + return prunable + + def register(self, schema): + """Registers a new named schema in this tracker. + + Args: + schema: Named Avro schema to register in this tracker. + """ + if schema.fullname in VALID_TYPES: + raise SchemaParseException( + '%s is a reserved type name.' % schema.fullname) + if schema.fullname in self.names: + raise SchemaParseException( + 'Avro name %r already exists.' % schema.fullname) + + logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) + self._names[schema.fullname] = schema + + +# ------------------------------------------------------------------------------ + + +class NamedSchema(Schema): + """Abstract base class for named schemas. + + Named schemas are enumerated in NAMED_TYPES. + """ + + def __init__( + self, + data_type, + name=None, + namespace=None, + names=None, + other_props=None, + ): + """Initializes a new named schema object. + + Args: + data_type: Type of the named schema. + name: Name (absolute or relative) of the schema. + namespace: Optional explicit namespace if name is relative. + names: Tracker to resolve and register Avro names. + other_props: Optional map of additional properties of the schema. + """ + assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) + self._avro_name = names.get_name(name=name, namespace=namespace) + + super(NamedSchema, self).__init__(data_type, other_props) + + names.register(self) + + self._props['name'] = self.name + if self.namespace: + self._props['namespace'] = self.namespace + + @property + def avro_name(self): + """Returns: the Name object describing this schema's name.""" + return self._avro_name + + @property + def name(self): + return self._avro_name.simple_name + + @property + def namespace(self): + return self._avro_name.namespace + + @property + def fullname(self): + return self._avro_name.fullname + + def name_ref(self, names): + """Reports this schema name relative to the specified name tracker. + + Args: + names: Avro name tracker to relativize this schema name against. + Returns: + This schema name, relativized against the specified name tracker. + """ + if self.namespace == names.default_namespace: + return self.name + return self.fullname + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + +# ------------------------------------------------------------------------------ + + +_NO_DEFAULT = object() + + +class Field(object): + """Representation of the schema of a field in a record.""" + + def __init__( + self, + data_type, + name, + index, + has_default, + default=_NO_DEFAULT, + order=None, + doc=None, + other_props=None + ): + """Initializes a new Field object. + + Args: + data_type: Avro schema of the field. + name: Name of the field. + index: 0-based position of the field. + has_default: + default: + order: + doc: + other_props: + """ + if (not isinstance(name, _str)) or (not name): + raise SchemaParseException('Invalid record field name: %r.' % name) + if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): + raise SchemaParseException('Invalid record field order: %r.' % order) + + # All properties of this record field: + self._props = {} + + self._has_default = has_default + if other_props: + self._props.update(other_props) + + self._index = index + self._type = self._props['type'] = data_type + self._name = self._props['name'] = name + + if has_default: + self._props['default'] = default + + if order is not None: + self._props['order'] = order + + if doc is not None: + self._props['doc'] = doc + + @property + def type(self): + """Returns: the schema of this field.""" + return self._type + + @property + def name(self): + """Returns: this field name.""" + return self._name + + @property + def index(self): + """Returns: the 0-based index of this field in the record.""" + return self._index + + @property + def default(self): + return self._props['default'] + + @property + def has_default(self): + return self._has_default + + @property + def order(self): + return self._props.get('order', None) + + @property + def doc(self): + return self._props.get('doc', None) + + @property + def props(self): + return self._props + + @property + def other_props(self): + return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) + + def __str__(self): + return json.dumps(self.to_json()) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['type'] = self.type.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Primitive Types + + +class PrimitiveSchema(Schema): + """Schema of a primitive Avro type. + + Valid primitive types are defined in PRIMITIVE_TYPES. + """ + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object for the specified primitive type. + + Args: + data_type: Type of the schema to construct. Must be primitive. + """ + if data_type not in PRIMITIVE_TYPES: + raise AvroException('%r is not a valid primitive type.' % data_type) + super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) + + @property + def name(self): + """Returns: the simple name of this schema.""" + # The name of a primitive type is the type itself. + return self.type + + @property + def fullname(self): + """Returns: the fully qualified name of this schema.""" + # The full name is the simple name for primitive schema. + return self.name + + def to_json(self, names=None): + if len(self.props) == 1: + return self.fullname + return self.props + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (non-recursive) + + +class FixedSchema(NamedSchema): + def __init__( + self, + name, + namespace, + size, + names=None, + other_props=None, + ): + # Ensure valid ctor args + if not isinstance(size, int): + fail_msg = 'Fixed Schema requires a valid integer for size property.' + raise AvroException(fail_msg) + + super(FixedSchema, self).__init__( + data_type=FIXED, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + self._props['size'] = size + + @property + def size(self): + """Returns: the size of this fixed schema, in bytes.""" + return self._props['size'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ + + +class EnumSchema(NamedSchema): + def __init__( + self, + name, + namespace, + symbols, + names=None, + doc=None, + other_props=None, + ): + """Initializes a new enumeration schema object. + + Args: + name: Simple name of this enumeration. + namespace: Optional namespace. + symbols: Ordered list of symbols defined in this enumeration. + names: + doc: + other_props: + """ + symbols = tuple(symbols) + symbol_set = frozenset(symbols) + if (len(symbol_set) != len(symbols) + or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): + raise AvroException( + 'Invalid symbols for enum schema: %r.' % (symbols,)) + + super(EnumSchema, self).__init__( + data_type=ENUM, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + + self._props['symbols'] = symbols + if doc is not None: + self._props['doc'] = doc + + @property + def symbols(self): + """Returns: the symbols defined in this enum.""" + return self._props['symbols'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (recursive) + + +class ArraySchema(Schema): + """Schema of an array.""" + + def __init__(self, items, other_props=None): + """Initializes a new array schema object. + + Args: + items: Avro schema of the array items. + other_props: + """ + super(ArraySchema, self).__init__( + data_type=ARRAY, + other_props=other_props, + ) + self._items_schema = items + self._props['items'] = items + + @property + def items(self): + """Returns: the schema of the items in this array.""" + return self._items_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + item_schema = self.items + to_dump['items'] = item_schema.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class MapSchema(Schema): + """Schema of a map.""" + + def __init__(self, values, other_props=None): + """Initializes a new map schema object. + + Args: + values: Avro schema of the map values. + other_props: + """ + super(MapSchema, self).__init__( + data_type=MAP, + other_props=other_props, + ) + self._values_schema = values + self._props['values'] = values + + @property + def values(self): + """Returns: the schema of the values in this map.""" + return self._values_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['values'] = self.values.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class UnionSchema(Schema): + """Schema of a union.""" + + def __init__(self, schemas): + """Initializes a new union schema object. + + Args: + schemas: Ordered collection of schema branches in the union. + """ + super(UnionSchema, self).__init__(data_type=UNION) + self._schemas = tuple(schemas) + + # Validate the schema branches: + + # All named schema names are unique: + named_branches = tuple( + filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) + unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) + if len(unique_names) != len(named_branches): + raise AvroException( + 'Invalid union branches with duplicate schema name:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + # Types are unique within unnamed schemas, and union is not allowed: + unnamed_branches = tuple( + filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) + unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) + if UNION in unique_types: + raise AvroException( + 'Invalid union branches contain other unions:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + if len(unique_types) != len(unnamed_branches): + raise AvroException( + 'Invalid union branches with duplicate type:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + @property + def schemas(self): + """Returns: the ordered list of schema branches in the union.""" + return self._schemas + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + to_dump.append(schema.to_json(names)) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class ErrorUnionSchema(UnionSchema): + """Schema representing the declared errors of a protocol message.""" + + def __init__(self, schemas): + """Initializes an error-union schema. + + Args: + schema: collection of error schema. + """ + # Prepend "string" to handle system errors + schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) + super(ErrorUnionSchema, self).__init__(schemas=schemas) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + # Don't print the system error schema + if schema.type == STRING: + continue + to_dump.append(schema.to_json(names)) + return to_dump + + +# ------------------------------------------------------------------------------ + + +class RecordSchema(NamedSchema): + """Schema of a record.""" + + @staticmethod + def _make_field(index, field_desc, names): + """Builds field schemas from a list of field JSON descriptors. + + Args: + index: 0-based index of the field in the record. + field_desc: JSON descriptors of a record field. + Return: + The field schema. + """ + field_schema = schema_from_json_data( + json_data=field_desc['type'], + names=names, + ) + other_props = ( + dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) + return Field( + data_type=field_schema, + name=field_desc['name'], + index=index, + has_default=('default' in field_desc), + default=field_desc.get('default', _NO_DEFAULT), + order=field_desc.get('order', None), + doc=field_desc.get('doc', None), + other_props=other_props, + ) + + @staticmethod + def make_field_list(field_desc_list, names): + """Builds field schemas from a list of field JSON descriptors. + + Guarantees field name unicity. + + Args: + field_desc_list: collection of field JSON descriptors. + names: Avro schema tracker. + Yields + Field schemas. + """ + for index, field_desc in enumerate(field_desc_list): + yield RecordSchema._make_field(index, field_desc, names) + + @staticmethod + def _make_field_map(fields): + """Builds the field map. + + Guarantees field name unicity. + + Args: + fields: iterable of field schema. + Returns: + A map of field schemas, indexed by name. + """ + field_map = {} + for field in fields: + if field.name in field_map: + raise SchemaParseException( + 'Duplicate record field name %r.' % field.name) + field_map[field.name] = field + return field_map + + def __init__( + self, + name, + namespace, + fields=None, + make_fields=None, + names=None, + record_type=RECORD, + doc=None, + other_props=None + ): + """Initializes a new record schema object. + + Args: + name: Name of the record (absolute or relative). + namespace: Optional namespace the record belongs to, if name is relative. + fields: collection of fields to add to this record. + Exactly one of fields or make_fields must be specified. + make_fields: function creating the fields that belong to the record. + The function signature is: make_fields(names) -> ordered field list. + Exactly one of fields or make_fields must be specified. + names: + record_type: Type of the record: one of RECORD, ERROR or REQUEST. + Protocol requests are not named. + doc: + other_props: + """ + if record_type == REQUEST: + # Protocol requests are not named: + super(RecordSchema, self).__init__( + data_type=REQUEST, + other_props=other_props, + ) + elif record_type in [RECORD, ERROR]: + # Register this record name in the tracker: + super(RecordSchema, self).__init__( + data_type=record_type, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + else: + raise SchemaParseException( + 'Invalid record type: %r.' % record_type) + + if record_type in [RECORD, ERROR]: + avro_name = names.get_name(name=name, namespace=namespace) + nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) + elif record_type == REQUEST: + # Protocol request has no name: no need to change default namespace: + nested_names = names + + if fields is None: + fields = make_fields(names=nested_names) + else: + assert make_fields is None + self._fields = tuple(fields) + + self._field_map = RecordSchema._make_field_map(self._fields) + + self._props['fields'] = fields + if doc is not None: + self._props['doc'] = doc + + @property + def fields(self): + """Returns: the field schemas, as an ordered tuple.""" + return self._fields + + @property + def field_map(self): + """Returns: a read-only map of the field schemas index by field names.""" + return self._field_map + + def to_json(self, names=None): + if names is None: + names = Names() + # Request records don't have names + if self.type == REQUEST: + return [f.to_json(names) for f in self.fields] + + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + + to_dump = names.prune_namespace(self.props.copy()) + to_dump['fields'] = [f.to_json(names) for f in self.fields] + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Module functions + + +def filter_keys_out(items, keys): + """Filters a collection of (key, value) items. + + Exclude any item whose key belongs to keys. + + Args: + items: Dictionary of items to filter the keys out of. + keys: Keys to filter out. + Yields: + Filtered items. + """ + for key, value in items.items(): + if key in keys: + continue + yield key, value + + +# ------------------------------------------------------------------------------ + + +def _schema_from_json_string(json_string, names): + if json_string in PRIMITIVE_TYPES: + return PrimitiveSchema(data_type=json_string) + + # Look for a known named schema: + schema = names.get_schema(name=json_string) + if schema is None: + raise SchemaParseException( + 'Unknown named schema %r, known names: %r.' + % (json_string, sorted(names.names))) + return schema + + +def _schema_from_json_array(json_array, names): + def MakeSchema(desc): + return schema_from_json_data(json_data=desc, names=names) + + return UnionSchema(map(MakeSchema, json_array)) + + +def _schema_from_json_object(json_object, names): + data_type = json_object.get('type') + if data_type is None: + raise SchemaParseException( + 'Avro schema JSON descriptor has no "type" property: %r' % json_object) + + other_props = dict( + filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) + + if data_type in PRIMITIVE_TYPES: + # FIXME should not ignore other properties + result = PrimitiveSchema(data_type, other_props=other_props) + + elif data_type in NAMED_TYPES: + name = json_object.get('name') + namespace = json_object.get('namespace', names.default_namespace) + if data_type == FIXED: + size = json_object.get('size') + result = FixedSchema(name, namespace, size, names, other_props) + elif data_type == ENUM: + symbols = json_object.get('symbols') + doc = json_object.get('doc') + result = EnumSchema(name, namespace, symbols, names, doc, other_props) + + elif data_type in [RECORD, ERROR]: + field_desc_list = json_object.get('fields', ()) + + def MakeFields(names): + return tuple(RecordSchema.make_field_list(field_desc_list, names)) + + result = RecordSchema( + name=name, + namespace=namespace, + make_fields=MakeFields, + names=names, + record_type=data_type, + doc=json_object.get('doc'), + other_props=other_props, + ) + else: + raise Exception('Internal error: unknown type %r.' % data_type) + + elif data_type in VALID_TYPES: + # Unnamed, non-primitive Avro type: + + if data_type == ARRAY: + items_desc = json_object.get('items') + if items_desc is None: + raise SchemaParseException( + 'Invalid array schema descriptor with no "items" : %r.' + % json_object) + result = ArraySchema( + items=schema_from_json_data(items_desc, names), + other_props=other_props, + ) + + elif data_type == MAP: + values_desc = json_object.get('values') + if values_desc is None: + raise SchemaParseException( + 'Invalid map schema descriptor with no "values" : %r.' + % json_object) + result = MapSchema( + values=schema_from_json_data(values_desc, names=names), + other_props=other_props, + ) + + elif data_type == ERROR_UNION: + error_desc_list = json_object.get('declared_errors') + assert error_desc_list is not None + error_schemas = map( + lambda desc: schema_from_json_data(desc, names=names), + error_desc_list) + result = ErrorUnionSchema(schemas=error_schemas) + + else: + raise Exception('Internal error: unknown type %r.' % data_type) + else: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r' % json_object) + return result + + +# Parsers for the JSON data types: +_JSONDataParserTypeMap = { + _str: _schema_from_json_string, + list: _schema_from_json_array, + dict: _schema_from_json_object, +} + + +def schema_from_json_data(json_data, names=None): + """Builds an Avro Schema from its JSON descriptor. + + Args: + json_data: JSON data representing the descriptor of the Avro schema. + names: Optional tracker for Avro named schemas. + Returns: + The Avro schema parsed from the JSON descriptor. + Raises: + SchemaParseException: if the descriptor is invalid. + """ + if names is None: + names = Names() + + # Select the appropriate parser based on the JSON data type: + parser = _JSONDataParserTypeMap.get(type(json_data)) + if parser is None: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) + return parser(json_data, names=names) + + +# ------------------------------------------------------------------------------ + + +def parse(json_string): + """Constructs a Schema from its JSON descriptor in text form. + + Args: + json_string: String representation of the JSON descriptor of the schema. + Returns: + The parsed schema. + Raises: + SchemaParseException: on JSON parsing error, + or if the JSON descriptor is invalid. + """ + try: + json_data = json.loads(json_string) + except Exception as exn: + raise SchemaParseException( + 'Error parsing schema from JSON: %r. ' + 'Error message: %r.' + % (json_string, exn)) + + # Initialize the names object + names = Names() + + # construct the Avro Schema object + return schema_from_json_data(json_data, names) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/base_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/base_client.py new file mode 100644 index 00000000000..4a4389b21d0 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/base_client.py @@ -0,0 +1,462 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import logging +import uuid +from typing import ( # pylint: disable=unused-import + Any, + Dict, + Optional, + Tuple, + TYPE_CHECKING, + Union, +) + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +from azure.core.configuration import Configuration +from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + AzureSasCredentialPolicy, + ContentDecodePolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + ProxyPolicy, + RedirectPolicy, + UserAgentPolicy, +) + +from .constants import CONNECTION_TIMEOUT, READ_TIMEOUT, SERVICE_HOST_BASE +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter +from .policies import ( + ExponentialRetry, + QueueMessagePolicy, + StorageBearerTokenCredentialPolicy, + StorageContentValidation, + StorageHeadersPolicy, + StorageHosts, + StorageLoggingPolicy, + StorageRequestHook, + StorageResponseHook, +) +from .._version import VERSION +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError(f"Invalid service: {service}") + service_name = service.split('-')[0] + account = parsed_url.netloc.split(f".{service_name}.core.") + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = f"{self.credential.account_name}-secondary.{service_name}.{SERVICE_HOST_BASE}" + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError(f"No host URL for location mode: {value}") + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += f"snapshot={self.snapshot}&" + if share_snapshot: + query_str += f"sharesnapshot={self.snapshot}&" + if sas_token and isinstance(credential, AzureSasCredential): + raise ValueError( + "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") + if is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + elif sas_token: + query_str += sas_token + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = StorageBearerTokenCredentialPolicy(credential) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError(f"Unsupported credential: {credential}") + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.headers_policy, + StorageRequestHook(**kwargs), + self._credential_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + + request = self._client._client.post( # pylint: disable=protected-access + url=( + f'{self.scheme}://{self.primary_hostname}/' + f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}" + f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}" + ), + headers={ + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + request.multipart_mixed_info = temp + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except HttpResponseError as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, str): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + if isinstance(credential, AzureNamedKeyCredential): + return SharedKeyCredentialPolicy(credential.named_key.name, credential.named_key.key) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict((key.upper(), val) for key, val in conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} + except KeyError: + credential = conn_settings.get("SHAREDACCESSSIGNATURE") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary =( + f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://" + f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}" + ) + secondary = ( + f"{conn_settings['ACCOUNTNAME']}-secondary." + f"{service}.{conn_settings['ENDPOINTSUFFIX']}" + ) + except KeyError: + pass + + if not primary: + try: + primary = ( + f"https://{conn_settings['ACCOUNTNAME']}." + f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}" + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + if service == "dfs": + primary = primary.replace(".blob.", ".dfs.") + if secondary: + secondary = secondary.replace(".blob.", ".dfs.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker=f"storage-{kwargs.pop('storage_sdk')}/{VERSION}", **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = [f"{k}={quote(v, safe='')}" for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, str): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/base_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/base_client_async.py new file mode 100644 index 00000000000..e74efb5950c --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/base_client_async.py @@ -0,0 +1,188 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.credentials import AzureSasCredential +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + AsyncRedirectPolicy, + AzureSasCredentialPolicy, + ContentDecodePolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import CONNECTION_TIMEOUT, READ_TIMEOUT +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + QueueMessagePolicy, + StorageContentValidation, + StorageHeadersPolicy, + StorageHosts, + StorageRequestHook, +) +from .policies_async import AsyncStorageBearerTokenCredentialPolicy, AsyncStorageResponseHook + +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncStorageBearerTokenCredentialPolicy(credential) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError(f"Unsupported credential: {credential}") + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url=( + f'{self.scheme}://{self.primary_hostname}/' + f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}" + f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}" + ), + headers={ + 'x-ms-version': self.api_version + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except HttpResponseError as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/constants.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/constants.py new file mode 100644 index 00000000000..0b4b029a2d1 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/constants.py @@ -0,0 +1,19 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from .._serialize import _SUPPORTED_API_VERSIONS + + +X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1] + +# Default socket timeouts, in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 60 + +DEFAULT_OAUTH_SCOPE = "/.default" +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/models.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/models.py new file mode 100644 index 00000000000..d997000e9a4 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/models.py @@ -0,0 +1,486 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes +from enum import Enum + +from azure.core import CaseInsensitiveEnumMeta + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + + # Generic storage values + ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" + ACCOUNT_BEING_CREATED = "AccountBeingCreated" + ACCOUNT_IS_DISABLED = "AccountIsDisabled" + AUTHENTICATION_FAILED = "AuthenticationFailed" + AUTHORIZATION_FAILURE = "AuthorizationFailure" + NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" + CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" + CONDITION_NOT_MET = "ConditionNotMet" + EMPTY_METADATA_KEY = "EmptyMetadataKey" + INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" + INTERNAL_ERROR = "InternalError" + INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" + INVALID_HEADER_VALUE = "InvalidHeaderValue" + INVALID_HTTP_VERB = "InvalidHttpVerb" + INVALID_INPUT = "InvalidInput" + INVALID_MD5 = "InvalidMd5" + INVALID_METADATA = "InvalidMetadata" + INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" + INVALID_RANGE = "InvalidRange" + INVALID_RESOURCE_NAME = "InvalidResourceName" + INVALID_URI = "InvalidUri" + INVALID_XML_DOCUMENT = "InvalidXmlDocument" + INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" + MD5_MISMATCH = "Md5Mismatch" + METADATA_TOO_LARGE = "MetadataTooLarge" + MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" + MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" + MISSING_REQUIRED_HEADER = "MissingRequiredHeader" + MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" + MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" + OPERATION_TIMED_OUT = "OperationTimedOut" + OUT_OF_RANGE_INPUT = "OutOfRangeInput" + OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" + REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" + RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" + REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" + RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" + RESOURCE_NOT_FOUND = "ResourceNotFound" + SERVER_BUSY = "ServerBusy" + UNSUPPORTED_HEADER = "UnsupportedHeader" + UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" + UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" + UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" + + # Blob values + APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" + BLOB_ALREADY_EXISTS = "BlobAlreadyExists" + BLOB_NOT_FOUND = "BlobNotFound" + BLOB_OVERWRITTEN = "BlobOverwritten" + BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" + BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" + BLOCK_LIST_TOO_LONG = "BlockListTooLong" + CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" + CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" + CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" + CONTAINER_BEING_DELETED = "ContainerBeingDeleted" + CONTAINER_DISABLED = "ContainerDisabled" + CONTAINER_NOT_FOUND = "ContainerNotFound" + CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" + COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" + COPY_ID_MISMATCH = "CopyIdMismatch" + FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" + INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" + INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + #: Deprecated: Please use INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED instead. + INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" + INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" + INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" + INVALID_BLOB_TIER = "InvalidBlobTier" + INVALID_BLOB_TYPE = "InvalidBlobType" + INVALID_BLOCK_ID = "InvalidBlockId" + INVALID_BLOCK_LIST = "InvalidBlockList" + INVALID_OPERATION = "InvalidOperation" + INVALID_PAGE_RANGE = "InvalidPageRange" + INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" + INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" + INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" + LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" + LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" + LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" + LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" + LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" + LEASE_ID_MISSING = "LeaseIdMissing" + LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" + LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" + LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" + LEASE_LOST = "LeaseLost" + LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" + LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" + LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" + MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" + NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" + OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" + PENDING_COPY_OPERATION = "PendingCopyOperation" + PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" + PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" + PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" + SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" + SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" + SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" + SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded" + #: Deprecated: Please use SNAPSHOT_OPERATION_RATE_EXCEEDED instead. + SNAPHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded" + SNAPSHOTS_PRESENT = "SnapshotsPresent" + SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" + SYSTEM_IN_USE = "SystemInUse" + TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" + UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" + BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" + BLOB_ARCHIVED = "BlobArchived" + BLOB_NOT_ARCHIVED = "BlobNotArchived" + + # Queue values + INVALID_MARKER = "InvalidMarker" + MESSAGE_NOT_FOUND = "MessageNotFound" + MESSAGE_TOO_LARGE = "MessageTooLarge" + POP_RECEIPT_MISMATCH = "PopReceiptMismatch" + QUEUE_ALREADY_EXISTS = "QueueAlreadyExists" + QUEUE_BEING_DELETED = "QueueBeingDeleted" + QUEUE_DISABLED = "QueueDisabled" + QUEUE_NOT_EMPTY = "QueueNotEmpty" + QUEUE_NOT_FOUND = "QueueNotFound" + + # File values + CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" + CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" + DELETE_PENDING = "DeletePending" + DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" + FILE_LOCK_CONFLICT = "FileLockConflict" + INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" + PARENT_NOT_FOUND = "ParentNotFound" + READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" + SHARE_ALREADY_EXISTS = "ShareAlreadyExists" + SHARE_BEING_DELETED = "ShareBeingDeleted" + SHARE_DISABLED = "ShareDisabled" + SHARE_NOT_FOUND = "ShareNotFound" + SHARING_VIOLATION = "SharingViolation" + SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" + SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" + SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" + SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" + CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero' + PATH_ALREADY_EXISTS = 'PathAlreadyExists' + INVALID_FLUSH_POSITION = 'InvalidFlushPosition' + INVALID_PROPERTY_NAME = 'InvalidPropertyName' + INVALID_SOURCE_URI = 'InvalidSourceUri' + UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion' + FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound' + PATH_NOT_FOUND = 'PathNotFound' + RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound' + SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound' + DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted' + FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists' + FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted' + INVALID_DESTINATION_PATH = 'InvalidDestinationPath' + INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath' + INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType' + LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken' + LEASE_NAME_MISMATCH = 'LeaseNameMismatch' + PATH_CONFLICT = 'PathConflict' + SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.blob.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + :keyword bool set_immutability_policy: + To enable operations related to set/delete immutability policy. + To get immutability policy, you just need read permission. + :keyword bool permanent_delete: + To enable permanent delete on the blob is permitted. + Valid for Object resource type of Blob only. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.permanent_delete = kwargs.pop('permanent_delete', False) + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('y' if self.permanent_delete else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + + ('i' if self.set_immutability_policy else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.blob.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_permanent_delete = 'y' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + p_set_immutability_policy = 'i' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy, + permanent_delete=p_permanent_delete) + + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.blob.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/parser.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/parser.py new file mode 100644 index 00000000000..a4f9da94cc2 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/parser.py @@ -0,0 +1,52 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from datetime import datetime, timezone + +EPOCH_AS_FILETIME = 116444736000000000 # January 1, 1970 as MS filetime +HUNDREDS_OF_NANOSECONDS = 10000000 + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') + +def _rfc_1123_to_datetime(rfc_1123: str) -> datetime: + """Converts an RFC 1123 date string to a UTC datetime. + """ + if not rfc_1123: + return None + + return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z") + +def _filetime_to_datetime(filetime: str) -> datetime: + """Converts an MS filetime string to a UTC datetime. "0" indicates None. + If parsing MS Filetime fails, tries RFC 1123 as backup. + """ + if not filetime: + return None + + # Try to convert to MS Filetime + try: + filetime = int(filetime) + if filetime == 0: + return None + + return datetime.fromtimestamp((filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc) + except ValueError: + pass + + # Try RFC 1123 as backup + return _rfc_1123_to_datetime(filetime) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/policies.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/policies.py new file mode 100644 index 00000000000..e8338782d19 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/policies.py @@ -0,0 +1,660 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + BearerTokenCredentialPolicy, + HeadersPolicy, + HTTPPolicy, + NetworkTraceLoggingPolicy, + RequestHistory, + SansIOHTTPPolicy, +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .authentication import StorageHttpChallenge +from .constants import DEFAULT_OAUTH_SCOPE, STORAGE_OAUTH_SCOPE +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): # pylint: disable=too-many-return-statements + """Is this method/status code retryable? (Based on allowlists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occurred, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + # retry if invalid content md5 + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = response.http_request.headers.get('content-md5', None) or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError(f"Attempting to use undefined host location {use_location}") + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + def __init__(self, logging_enable=False, **kwargs): + self.logging_body = kwargs.pop("logging_body", False) + super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs) + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + self.logging_body = self.logging_body or options.pop("logging_body", False) + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + if self.logging_body: + _LOGGER.debug(str(http_request.body)) + else: + # We don't want to log the binary data of a file upload. + _LOGGER.debug("Hidden body, please use logging_body to show body") + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + resp_content_type = response.http_response.headers.get("content-type", "") + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif resp_content_type.endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif resp_content_type.startswith("image"): + _LOGGER.debug("Body contains image data.") + + if self.logging_body and resp_content_type.startswith("text"): + _LOGGER.debug(response.http_response.text()) + elif self.logging_body: + try: + _LOGGER.debug(response.http_response.body()) + except ValueError: + _LOGGER.debug("Body is streamable") + + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + # Values could be 0 + data_stream_total = request.context.get('data_stream_total') + if data_stream_total is None: + data_stream_total = request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') + if download_stream_current is None: + download_stream_current = request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') + if upload_stream_current is None: + upload_stream_current = request.context.options.pop('upload_stream_current', None) + + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + + will_retry = is_retry(response, request.context.options.get('mode')) + # Auth error could come from Bearer challenge, in which case this request will be made again + is_auth_error = response.http_response.status_code == 401 + should_update_counts = not (will_retry or is_auth_error) + + if should_update_counts and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif should_update_counts and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + # Since HTTP does not differentiate between no content and empty content, + # we have to perform a None check. + data = data or b"" + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError(( + f"MD5 mismatch. Expected value is '{response.http_response.headers['content-md5']}', " + f"computed value is '{computed_md5}'."), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the allowlist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy): + """ Custom Bearer token credential policy for following Storage Bearer challenges """ + + def __init__(self, credential, **kwargs): + # type: (TokenCredential, **Any) -> None + super(StorageBearerTokenCredentialPolicy, self).__init__(credential, STORAGE_OAUTH_SCOPE, **kwargs) + + def on_challenge(self, request, response): + # type: (PipelineRequest, PipelineResponse) -> bool + try: + auth_header = response.http_response.headers.get("WWW-Authenticate") + challenge = StorageHttpChallenge(auth_header) + except ValueError: + return False + + scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE + self.authorize_request(request, scope, tenant_id=challenge.tenant_id) + + return True diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/policies_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/policies_async.py new file mode 100644 index 00000000000..b0eae9f1c42 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/policies_async.py @@ -0,0 +1,253 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .authentication import StorageHttpChallenge +from .constants import DEFAULT_OAUTH_SCOPE, STORAGE_OAUTH_SCOPE +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + # Values could be 0 + data_stream_total = request.context.get('data_stream_total') + if data_stream_total is None: + data_stream_total = request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') + if download_stream_current is None: + download_stream_current = request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') + if upload_stream_current is None: + upload_stream_current = request.context.options.pop('upload_stream_current', None) + + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + # Auth error could come from Bearer challenge, in which case this request will be made again + is_auth_error = response.http_response.status_code == 401 + should_update_counts = not (will_retry or is_auth_error) + + if should_update_counts and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif should_update_counts and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy): + """ Custom Bearer token credential policy for following Storage Bearer challenges """ + + def __init__(self, credential, **kwargs): + # type: (AsyncTokenCredential, **Any) -> None + super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, STORAGE_OAUTH_SCOPE, **kwargs) + + async def on_challenge(self, request, response): + # type: (PipelineRequest, PipelineResponse) -> bool + try: + auth_header = response.http_response.headers.get("WWW-Authenticate") + challenge = StorageHttpChallenge(auth_header) + except ValueError: + return False + + scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE + await self.authorize_request(request, scope, tenant_id=challenge.tenant_id) + + return True diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/request_handlers.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/request_handlers.py new file mode 100644 index 00000000000..923b7890fda --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/request_handlers.py @@ -0,0 +1,278 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +import stat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + mode = fstat(fileno).st_mode + if stat.S_ISREG(mode) or stat.S_ISLNK(mode): + #st_size only meaningful if regular file or symlink, other types + # e.g. sockets may return misleading sizes like 0 + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, OSError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError(f"Invalid page blob start_range: {start_range}. " + "The size must be aligned to a 512-byte boundary.") + if end_range is not None and end_range % 512 != 511: + raise ValueError(f"Invalid page blob end_range: {end_range}. " + "The size must be aligned to a 512-byte boundary.") + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = f'bytes={start_range}-{end_range}' + elif start_range is not None: + range_header = f"bytes={start_range}-" + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range required for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers[f'x-ms-meta-{key.strip()}'] = value.strip() if value else value + return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/response_handlers.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/response_handlers.py new file mode 100644 index 00000000000..aec26f43d2a --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/response_handlers.py @@ -0,0 +1,203 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from xml.etree.ElementTree import Element + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + try: + raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} + except AttributeError: + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.http_response.location_mode, deserialized + + +def return_raw_deserialized(response, *_): + return response.http_response.location_mode, response.context[ContentDecodePolicy.CONTEXT_NAME] + + +def process_storage_error(storage_error): # pylint:disable=too-many-statements + raise_error = HttpResponseError + serialized = False + if not storage_error.response: + raise storage_error + # If it is one of those three then it has been serialized prior by the generated layer. + if isinstance(storage_error, (PartialBatchErrorException, + ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): + serialized = True + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + error_dict = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + try: + error_body = error_body or storage_error.response.reason + except AttributeError: + error_body = '' + # If it is an XML response + if isinstance(error_body, Element): + error_dict = { + child.tag.lower(): child.text + for child in error_body + } + # If it is a JSON response + elif isinstance(error_body, dict): + error_dict = error_body.get('error', {}) + elif not error_code: + _LOGGER.warning( + 'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) + error_dict = {'message': str(error_body)} + + # If we extracted from a Json or XML response + if error_dict: + error_code = error_dict.get('code') + error_message = error_dict.get('message') + additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} + except DecodeError: + pass + + try: + # This check would be unnecessary if we have already serialized the error + if error_code and not serialized: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + # Error message should include all the error properties + try: + error_message += f"\nErrorCode:{error_code.value}" + except AttributeError: + error_message += f"\nErrorCode:{error_code}" + for name, info in additional_data.items(): + error_message += f"\n{name}:{info}" + + # No need to create an instance if it has already been serialized by the generated layer + if serialized: + storage_error.message = error_message + error = storage_error + else: + error = raise_error(message=error_message, response=storage_error.response) + # Ensure these properties are stored in the error instance as well (not just the error message) + error.error_code = error_code + error.additional_info = additional_data + # error.args is what's surfaced on the traceback - show error message in all cases + error.args = (error.message,) + try: + # `from None` prevents us from double printing the exception (suppresses generated layer error context) + exec("raise error from None") # pylint: disable=exec-used # nosec + except SyntaxError: + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/shared_access_signature.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/shared_access_signature.py new file mode 100644 index 00000000000..b4ebbdb7188 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/shared_access_signature.py @@ -0,0 +1,234 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + +# cspell:ignoreRegExp rsc. +# cspell:ignoreRegExp s..?id +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for blob only + SIGNED_ENCRYPTION_SCOPE = 'ses' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for blob only + QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None, **kwargs): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :keyword str encryption_scope: + Optional. If specified, this is the encryption scope to use when sending requests + authorized with this SAS URI. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_encryption_scope(**kwargs) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_encryption_scope(self, **kwargs): + self._add_query(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE, kwargs.pop('encryption_scope', None)) + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE)) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join([f'{n}={url_quote(v)}' for n, v in self.query_dict.items() if v is not None]) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/uploads.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/uploads.py new file mode 100644 index 00000000000..df2fd7a900e --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/uploads.py @@ -0,0 +1,607 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation +from itertools import islice +from math import ceil +from threading import Lock + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + progress_hook=None, + **kwargs): + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + progress_hook=progress_hook, + **kwargs) + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + progress_hook=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + progress_hook=progress_hook, + **kwargs) + + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + if any(range_ids): + return sorted(range_ids) + return [] + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__( + self, service, + total_size, + chunk_size, + stream, + parallel, + encryptor=None, + padder=None, + progress_hook=None, + **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + self.progress_hook = progress_hook + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, bytes): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + self.progress_hook(self.progress_total, self.total_size) + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = f'{chunk_offset:032d}' + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, index, block_stream): + try: + block_id = f'BlockId{"%05d" % (index/self.chunk_size)}' + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = f"bytes={chunk_offset}-{chunk_end}" + computed_md5 = None + self.response_headers = self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return f'bytes={chunk_offset}-{chunk_end}', response + + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is not seekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, str): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + # This means count < size and what's leftover will be returned in this call. + except StopIteration: + self.leftover = b"" + + if count >= size: + self.leftover = data[size:] + + return data[:size] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/uploads_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/uploads_async.py new file mode 100644 index 00000000000..dd436906146 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared/uploads_async.py @@ -0,0 +1,461 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +import inspect +import threading +from asyncio import Lock +from io import UnsupportedOperation +from itertools import islice +from math import ceil +from typing import AsyncGenerator, Union + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +async def _async_parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = await pending.__anext__() + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopAsyncIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + progress_hook=None, + **kwargs): + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + progress_hook=progress_hook, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [] + for _ in range(max_concurrency): + try: + chunk = await upload_tasks.__anext__() + running_futures.append(asyncio.ensure_future(uploader.process_chunk(chunk))) + except StopAsyncIteration: + break + + range_ids = await _async_parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + async for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + progress_hook=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + progress_hook=progress_hook, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + if any(range_ids): + return sorted(range_ids) + return + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__( + self, service, + total_size, + chunk_size, + stream, + parallel, + encryptor=None, + padder=None, + progress_hook=None, + **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + self.progress_hook = progress_hook + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + async def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if inspect.isawaitable(temp): + temp = await temp + if not isinstance(temp, bytes): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + await self.progress_hook(self.progress_total, self.total_size) + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = f'{chunk_offset:032d}' + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + body=chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, index, block_stream): + try: + block_id = f'BlockId{"%05d" % (index/self.chunk_size)}' + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = f'bytes={chunk_offset}-{chunk_end}' + computed_md5 = None + self.response_headers = await self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = f'bytes={chunk_offset}-{chunk_end}' + return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass + + +class AsyncIterStreamer(): + """ + File-like streaming object for AsyncGenerators. + """ + def __init__(self, generator: AsyncGenerator[Union[bytes, str], None], encoding: str = "UTF-8"): + self.iterator = generator.__aiter__() + self.leftover = b"" + self.encoding = encoding + + def seekable(self): + return False + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is not seekable.") + + async def read(self, size: int) -> bytes: + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = await self.iterator.__anext__() + if isinstance(chunk, str): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + # This means count < size and what's leftover will be returned in this call. + except StopAsyncIteration: + self.leftover = b"" + + if count >= size: + self.leftover = data[size:] + + return data[:size] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared_access_signature.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared_access_signature.py new file mode 100644 index 00000000000..f7e0e67a3b2 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_shared_access_signature.py @@ -0,0 +1,618 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TYPE_CHECKING +) + +from ._shared import sign_string, url_quote +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services, UserDelegationKey +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, \ + QueryStringConstants + +if TYPE_CHECKING: + from datetime import datetime + from ..blob import ( + ResourceTypes, + AccountSasPermissions, + ContainerSasPermissions, + BlobSasPermissions + ) + + +class BlobQueryStringConstants(object): + SIGNED_TIMESTAMP = 'snapshot' + + +class BlobSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating blob and container access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key=None, user_delegation_key=None): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param ~azure.storage.blob.models.UserDelegationKey user_delegation_key: + Instead of an account key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling get_user_delegation_key on any Blob service object. + ''' + super(BlobSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + self.user_delegation_key = user_delegation_key + + def generate_blob(self, container_name, blob_name, snapshot=None, version_id=None, permission=None, + expiry=None, start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the blob or one of its snapshots. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param str blob_name: + Name of blob. + :param str snapshot: + The snapshot parameter is an opaque DateTime value that, + when present, specifies the blob snapshot to grant permission. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered racwdxytmei. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or BlobSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = container_name + '/' + blob_name + + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + + resource = 'bs' if snapshot else 'b' + resource = 'bv' if version_id else resource + resource = 'd' if kwargs.pop("is_directory", None) else resource + sas.add_resource(resource) + + sas.add_timestamp(snapshot or version_id) + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_encryption_scope(**kwargs) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, resource_path, + user_delegation_key=self.user_delegation_key) + + return sas.get_token() + + def generate_container(self, container_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None, **kwargs): + ''' + Generates a shared access signature for the container. + Use the returned signature with the sas_token parameter of any BlobService. + + :param str container_name: + Name of container. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered racwdxyltfmei. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_blob_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _BlobSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('c') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_encryption_scope(**kwargs) + sas.add_info_for_hns_account(**kwargs) + sas.add_resource_signature(self.account_name, self.account_key, container_name, + user_delegation_key=self.user_delegation_key) + return sas.get_token() + + +class _BlobSharedAccessHelper(_SharedAccessHelper): + + def add_timestamp(self, timestamp): + self._add_query(BlobQueryStringConstants.SIGNED_TIMESTAMP, timestamp) + + def add_info_for_hns_account(self, **kwargs): + self._add_query(QueryStringConstants.SIGNED_DIRECTORY_DEPTH, kwargs.pop('sdd', None)) + self._add_query(QueryStringConstants.SIGNED_AUTHORIZED_OID, kwargs.pop('preauthorized_agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_UNAUTHORIZED_OID, kwargs.pop('agent_object_id', None)) + self._add_query(QueryStringConstants.SIGNED_CORRELATION_ID, kwargs.pop('correlation_id', None)) + + def get_value_to_append(self, query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + def add_resource_signature(self, account_name, account_key, path, user_delegation_key=None): + # pylint: disable = no-member + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/blob/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (self.get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource) + + if user_delegation_key is not None: + self._add_query(QueryStringConstants.SIGNED_OID, user_delegation_key.signed_oid) + self._add_query(QueryStringConstants.SIGNED_TID, user_delegation_key.signed_tid) + self._add_query(QueryStringConstants.SIGNED_KEY_START, user_delegation_key.signed_start) + self._add_query(QueryStringConstants.SIGNED_KEY_EXPIRY, user_delegation_key.signed_expiry) + self._add_query(QueryStringConstants.SIGNED_KEY_SERVICE, user_delegation_key.signed_service) + self._add_query(QueryStringConstants.SIGNED_KEY_VERSION, user_delegation_key.signed_version) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_TID) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_START) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_EXPIRY) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_SERVICE) + + self.get_value_to_append(QueryStringConstants.SIGNED_KEY_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_AUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_UNAUTHORIZED_OID) + + self.get_value_to_append(QueryStringConstants.SIGNED_CORRELATION_ID)) + else: + string_to_sign += self.get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + string_to_sign += \ + (self.get_value_to_append(QueryStringConstants.SIGNED_IP) + + self.get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + self.get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + self.get_value_to_append(QueryStringConstants.SIGNED_RESOURCE) + + self.get_value_to_append(BlobQueryStringConstants.SIGNED_TIMESTAMP) + + self.get_value_to_append(QueryStringConstants.SIGNED_ENCRYPTION_SCOPE) + + self.get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + self.get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key if user_delegation_key is None else user_delegation_key.value, + string_to_sign)) + + def get_token(self): + # a conscious decision was made to exclude the timestamp in the generated token + # this is to avoid having two snapshot ids in the query parameters when the user appends the snapshot timestamp + exclude = [BlobQueryStringConstants.SIGNED_TIMESTAMP] + return '&'.join(['{0}={1}'.format(n, url_quote(v)) + for n, v in self.query_dict.items() if v is not None and n not in exclude]) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for the blob service. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param resource_types: + Specifies the resource types that are accessible with the account SAS. + :type resource_types: str or ~azure.storage.blob.ResourceTypes + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.AccountSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str encryption_scope: + Specifies the encryption scope for a request made so that all write operations will be service encrypted. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication.py + :start-after: [START create_sas_token] + :end-before: [END create_sas_token] + :language: python + :dedent: 8 + :caption: Generating a shared access signature. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(blob=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_container_sas( + account_name, # type: str + container_name, # type: str + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[ContainerSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a container. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered racwdxyltfmei. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.ContainerSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str encryption_scope: + Specifies the encryption scope for a request made so that all write operations will be service encrypted. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. This can only be used when generating a SAS with delegation key. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 12 + :caption: Generating a sas token. + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + if isinstance(account_key, UserDelegationKey): + user_delegation_key = account_key + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_container( + container_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_blob_sas( + account_name, # type: str + container_name, # type: str + blob_name, # type: str + snapshot=None, # type: Optional[str] + account_key=None, # type: Optional[str] + user_delegation_key=None, # type: Optional[UserDelegationKey] + permission=None, # type: Optional[Union[BlobSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Any + """Generates a shared access signature for a blob. + + Use the returned signature with the credential parameter of any BlobServiceClient, + ContainerClient or BlobClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str container_name: + The name of the container. + :param str blob_name: + The name of the blob. + :param str snapshot: + An optional blob snapshot ID. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + Either `account_key` or `user_delegation_key` must be specified. + :param ~azure.storage.blob.UserDelegationKey user_delegation_key: + Instead of an account shared key, the user could pass in a user delegation key. + A user delegation key can be obtained from the service by authenticating with an AAD identity; + this can be accomplished by calling :func:`~azure.storage.blob.BlobServiceClient.get_user_delegation_key`. + When present, the SAS is signed with the user delegation key instead. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered racwdxytmei. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.blob.BlobSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.blob.ContainerClient.set_container_access_policy()`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str version_id: + An optional blob version ID. This parameter is only applicable for versioning-enabled + Storage accounts. Note that the 'versionid' query parameter is not included in the output + SAS. Therefore, please provide the 'version_id' parameter to any APIs when using the output + SAS to operate on a specific version. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str encryption_scope: + Specifies the encryption scope for a request made so that all write operations will be service encrypted. + :keyword str correlation_id: + The correlation id to correlate the storage audit logs with the audit logs used by the principal + generating and distributing the SAS. This can only be used when generating a SAS with delegation key. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + if not user_delegation_key and not account_key: + raise ValueError("Either user_delegation_key or account_key must be provided.") + if isinstance(account_key, UserDelegationKey): + user_delegation_key = account_key + version_id = kwargs.pop('version_id', None) + if version_id and snapshot: + raise ValueError("snapshot and version_id cannot be set at the same time.") + if user_delegation_key: + sas = BlobSharedAccessSignature(account_name, user_delegation_key=user_delegation_key) + else: + sas = BlobSharedAccessSignature(account_name, account_key=account_key) + return sas.generate_blob( + container_name, + blob_name, + snapshot=snapshot, + version_id=version_id, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_upload_helpers.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_upload_helpers.py new file mode 100644 index 00000000000..c13d43adf8c --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_upload_helpers.py @@ -0,0 +1,351 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from io import SEEK_SET, UnsupportedOperation +from typing import TypeVar, TYPE_CHECKING + +from azure.core.exceptions import ResourceExistsError, ResourceModifiedError, HttpResponseError + +from ._shared.response_handlers import process_storage_error, return_response_headers +from ._shared.models import StorageErrorCode +from ._shared.uploads import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader +) +from ._generated.models import ( + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) +from ._encryption import ( + GCMBlobEncryptionStream, + encrypt_blob, + get_adjusted_upload_size, + get_blob_encryptor_and_padder, + generate_blob_encryption_data, + _ENCRYPTION_PROTOCOL_V1, + _ENCRYPTION_PROTOCOL_V2 +) + +if TYPE_CHECKING: + BlobLeaseClient = TypeVar("BlobLeaseClient") + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = '{0} should be a seekable file-like/io.IOBase type stream object.' + + +def _convert_mod_error(error): + message = error.message.replace( + "The condition specified using HTTP conditional header(s) is not met.", + "The specified blob already exists.") + message = message.replace("ConditionNotMet", "BlobAlreadyExists") + overwrite_error = ResourceExistsError( + message=message, + response=error.response, + error=error) + overwrite_error.error_code = StorageErrorCode.blob_already_exists + raise overwrite_error + + +def _any_conditions(modified_access_conditions=None, **kwargs): # pylint: disable=unused-argument + return any([ + modified_access_conditions.if_modified_since, + modified_access_conditions.if_unmodified_since, + modified_access_conditions.if_none_match, + modified_access_conditions.if_match + ]) + + +def upload_block_blob( # pylint: disable=too-many-locals, too-many-statements + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count = get_adjusted_upload_size(adjusted_count, encryption_options['version']) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + immutability_policy = kwargs.pop('immutability_policy', None) + immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time + immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode + legal_hold = kwargs.pop('legal_hold', None) + progress_hook = kwargs.pop('progress_hook', None) + + # Do single put if the size is smaller than or equal config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if not isinstance(data, bytes): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key'], encryption_options['version']) + headers['x-ms-meta-encryptiondata'] = encryption_data + + response = client.upload( + body=data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + + if progress_hook: + progress_hook(adjusted_count, adjusted_count) + + return response + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + total_size = length + encryptor, padder = None, None + if encryption_options and encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data( + encryption_options['key'], + encryption_options['version']) + headers['x-ms-meta-encryptiondata'] = encryption_data + + if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1: + encryptor, padder = get_blob_encryptor_and_padder(cek, iv, True) + + # Adjust total_size for encryption V2 + if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V2: + # Adjust total_size for encryption V2 + total_size = adjusted_count + # V2 wraps the data stream with an encryption stream + stream = GCMBlobEncryptionStream(cek, stream) + + block_ids = upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=total_size, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + progress_hook=progress_hook, + encryptor=encryptor, + padder=padder, + headers=headers, + **kwargs + ) + else: + block_ids = upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + progress_hook=progress_hook, + headers=headers, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + tier = None + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + tier = premium_page_blob_tier.value + except AttributeError: + tier = premium_page_blob_tier + + if encryption_options and encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data( + encryption_options['key'], + encryption_options['version']) + headers['x-ms-meta-encryptiondata'] = encryption_data + + blob_tags_string = kwargs.pop('blob_tags_string', None) + progress_hook = kwargs.pop('progress_hook', None) + + response = client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + tier=tier, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + if encryption_options and encryption_options.get('key'): + if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1: + encryptor, padder = get_blob_encryptor_and_padder(cek, iv, False) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + progress_hook=progress_hook, + headers=headers, + **kwargs) + + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + progress_hook = kwargs.pop('progress_hook', None) + + try: + if overwrite: + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + progress_hook=progress_hook, + headers=headers, + **kwargs) + except HttpResponseError as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + progress_hook=progress_hook, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_version.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_version.py new file mode 100644 index 00000000000..de7054ecd7d --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.16.0" diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/__init__.py new file mode 100644 index 00000000000..e8286fe217b --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/__init__.py @@ -0,0 +1,147 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import os + +from ._list_blobs_helper import BlobPrefix +from .._models import BlobType +from .._shared.policies_async import ExponentialRetry, LinearRetry +from ._blob_client_async import BlobClient +from ._container_client_async import ContainerClient +from ._blob_service_client_async import BlobServiceClient +from ._lease_async import BlobLeaseClient +from ._download_async import StorageStreamDownloader + + +async def upload_blob_to_url( + blob_url, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + **kwargs): + # type: (...) -> dict[str, Any] + """Upload data to a given URL + + The data will be uploaded as a block blob. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param data: + The data to upload. This can be bytes, text, an iterable or a file-like object. + :type data: bytes or str or Iterable + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword bool overwrite: + Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob_to_url will overwrite any existing data. If set to False, the + operation will fail with a ResourceExistsError. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword dict(str,str) metadata: + Name-value pairs associated with the blob as metadata. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword str encoding: + Encoding to use if text is supplied as input. Defaults to UTF-8. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict(str, Any) + """ + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + return await client.upload_blob(data=data, blob_type=BlobType.BlockBlob, **kwargs) + + +async def _download_to_stream(client, handle, **kwargs): + """Download data to specified open file-handle.""" + stream = await client.download_blob(**kwargs) + await stream.readinto(handle) + + +async def download_blob_from_url( + blob_url, # type: str + output, # type: str + credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + **kwargs): + # type: (...) -> None + """Download the contents of a blob to a local file or stream. + + :param str blob_url: + The full URI to the blob. This can also include a SAS token. + :param output: + Where the data should be downloaded to. This could be either a file path to write to, + or an open IO handle to write to. + :type output: str or writable stream + :param credential: + The credentials with which to authenticate. This is optional if the + blob URL already has a SAS token or the blob is public. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword bool overwrite: + Whether the local file should be overwritten if it already exists. The default value is + `False` - in which case a ValueError will be raised if the file already exists. If set to + `True`, an attempt will be made to write to the existing file. If a stream handle is passed + in, this value is ignored. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :keyword int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :rtype: None + """ + overwrite = kwargs.pop('overwrite', False) + async with BlobClient.from_blob_url(blob_url, credential=credential) as client: + if hasattr(output, 'write'): + await _download_to_stream(client, output, **kwargs) + else: + if not overwrite and os.path.isfile(output): + raise ValueError("The file '{}' already exists.".format(output)) + with open(output, 'wb') as file_handle: + await _download_to_stream(client, file_handle, **kwargs) + + +__all__ = [ + 'upload_blob_to_url', + 'download_blob_from_url', + 'BlobServiceClient', + 'BlobPrefix', + 'ContainerClient', + 'BlobClient', + 'BlobLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'StorageStreamDownloader' +] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_blob_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_blob_client_async.py new file mode 100644 index 00000000000..4266d36f543 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_blob_client_async.py @@ -0,0 +1,2914 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method + +import warnings +from functools import partial +from typing import ( # pylint: disable=unused-import + Any, AnyStr, AsyncIterable, Dict, IO, Iterable, List, Optional, overload, Tuple, Union, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._generated.aio import AzureBlobStorage +from .._generated.models import CpkInfo +from .._blob_client import BlobClient as BlobClientBase +from .._deserialize import ( + deserialize_blob_properties, + deserialize_pipeline_response_into_cls, + get_page_ranges_result, + parse_tags +) +from .._encryption import StorageEncryptionMixin +from .._models import BlobType, BlobBlock, BlobProperties, PageRange +from .._serialize import get_modify_conditions, get_api_version, get_access_conditions +from ._download_async import StorageStreamDownloader +from ._lease_async import BlobLeaseClient +from ._models import PageRangePaged +from ._upload_helpers import ( + upload_block_blob, + upload_append_blob, + upload_page_blob +) + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + ContentSettings, + ImmutabilityPolicy, + PremiumPageBlobTier, + StandardBlobTier, + SequenceNumberAction + ) + + +class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase, StorageEncryptionMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific blob, although that blob may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the blob, + use the :func:`from_blob_url` classmethod. + :param container_name: The container name for the blob. + :type container_name: str + :param blob_name: The name of the blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob_name: str + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client] + :end-before: [END create_blob_client] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a URL to a public blob (no auth needed). + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_client_sas_url] + :end-before: [END create_blob_client_sas_url] + :language: python + :dedent: 8 + :caption: Creating the BlobClient from a SAS URL to a blob. + """ + def __init__( + self, account_url: str, + container_name: str, + blob_name: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> None: + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobClient, self).__init__( + account_url, + container_name=container_name, + blob_name=blob_name, + snapshot=snapshot, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._configure_encryption(kwargs) + + @distributed_trace_async + async def get_account_information(self, **kwargs): # type: ignore + # type: (Optional[int]) -> Dict[str, str] + """Gets information related to the storage account in which the blob resides. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.blob.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_blob_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Dict[str, Any] + """ + Creates a new Block Blob where the content of the blob is read from a given URL. + The content of an existing blob is overwritten with the new blob. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. + :keyword bool include_source_blob_properties: + Indicates if properties from the source blob should be copied. Defaults to True. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :paramtype tags: dict(str, str) + :keyword bytearray source_content_md5: + Specify the md5 that is used to verify the integrity of the source bytes. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_blob_from_url_options( + source_url=self._encode_source_url(source_url), + **kwargs) + try: + return await self._client.block_blob.put_blob_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_blob( + self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]], + blob_type: Union[str, BlobType] = BlobType.BlockBlob, + length: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ) -> Dict[str, Any]: + """Creates a new blob from a data source with automatic chunking. + + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + If specified, upload_blob only succeeds if the + blob's lease is active and matches this ID. + Required if the blob has an active lease. + :paramtype: ~azure.storage.blob.aio.BlobLeaseClient + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + Currently this parameter of upload_blob() API is for BlockBlob only. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword progress_hook: + An async callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transfered + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START upload_a_blob] + :end-before: [END upload_a_blob] + :language: python + :dedent: 16 + :caption: Upload a blob to the container. + """ + options = self._upload_blob_options( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + **kwargs) + if blob_type == BlobType.BlockBlob: + return await upload_block_blob(**options) + if blob_type == BlobType.PageBlob: + return await upload_page_blob(**options) + return await upload_append_blob(**options) + + @overload + async def download_blob( + self, offset: int = None, + length: int = None, + *, + encoding: str, + **kwargs) -> StorageStreamDownloader[str]: + ... + + @overload + async def download_blob( + self, offset: int = None, + length: int = None, + *, + encoding: None = None, + **kwargs) -> StorageStreamDownloader[bytes]: + ... + + @distributed_trace_async + async def download_blob( + self, offset: int = None, + length: int = None, + *, + encoding: Optional[str] = None, + **kwargs) -> StorageStreamDownloader: + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword progress_hook: + An async callback to track the progress of a long running download. The signature is + function(current: int, total: int) where current is the number of bytes transfered + so far, and total is the total size of the download. + :paramtype progress_hook: Callable[[int, int], Awaitable[None]] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START download_a_blob] + :end-before: [END download_a_blob] + :language: python + :dedent: 16 + :caption: Download a blob. + """ + options = self._download_blob_options( + offset=offset, + length=length, + encoding=encoding, + **kwargs) + downloader = StorageStreamDownloader(**options) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_blob(self, delete_snapshots=None, **kwargs): + # type: (str, Any) -> None + """Marks the specified blob for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob() + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob + and retains the blob for a specified number of days. + After the specified number of days, the blob's data is removed from the service during garbage collection. + Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']` + option. Soft-deleted blob can be restored using :func:`undelete` operation. + + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. If specified, delete_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START delete_blob] + :end-before: [END delete_blob] + :language: python + :dedent: 16 + :caption: Delete a blob. + """ + options = self._delete_blob_options(delete_snapshots=delete_snapshots, **kwargs) + try: + await self._client.blob.delete(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def undelete_blob(self, **kwargs): + # type: (Any) -> None + """Restores soft-deleted blobs or snapshots. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + If blob versioning is enabled, the base blob cannot be restored using this + method. Instead use :func:`start_copy_from_url` with the URL of the blob version + you wish to promote to the current version. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START undelete_blob] + :end-before: [END undelete_blob] + :language: python + :dedent: 12 + :caption: Undeleting a blob. + """ + try: + await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a blob exists with the defined parameters, and returns + False otherwise. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to check if it exists. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: boolean + """ + try: + await self._client.blob.get_properties( + snapshot=self.snapshot, + **kwargs) + return True + # Encrypted with CPK + except ResourceExistsError: + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def get_blob_properties(self, **kwargs): + # type: (Any) -> BlobProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the blob. It does not return the content of the blob. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to get properties. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: BlobProperties + :rtype: ~azure.storage.blob.BlobProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START get_blob_properties] + :end-before: [END get_blob_properties] + :language: python + :dedent: 12 + :caption: Getting the properties for a blob. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + cpk = kwargs.pop('cpk', None) + cpk_info = None + if cpk: + if self.scheme.lower() != 'https': + raise ValueError("Customer provided encryption key must be used over HTTPS.") + cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash, + encryption_algorithm=cpk.algorithm) + try: + cls_method = kwargs.pop('cls', None) + if cls_method: + kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method) + blob_props = await self._client.blob.get_properties( + timeout=kwargs.pop('timeout', None), + version_id=kwargs.pop('version_id', None), + snapshot=self.snapshot, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, + cpk_info=cpk_info, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + blob_props.name = self.blob_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name + blob_props.snapshot = self.snapshot + return blob_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings=None, **kwargs): + # type: (Optional[ContentSettings], Any) -> None + """Sets system properties on the blob. + + If one property is set for the content_settings, all properties will be overridden. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_http_headers_options(content_settings=content_settings, **kwargs) + try: + return await self._client.blob.set_http_headers(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Sets user-defined metadata for the blob as one or more name-value pairs. + + :param metadata: + Dict containing name and value pairs. Each call to this operation + replaces all existing metadata attached to the blob. To remove all + metadata from the blob, call this operation with no metadata headers. + :type metadata: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified) + """ + options = self._set_blob_metadata_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.set_metadata(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_immutability_policy(self, immutability_policy, **kwargs): + # type: (ImmutabilityPolicy, **Any) -> Dict[str, str] + """The Set Immutability Policy operation sets the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time + kwargs['immutability_policy_mode'] = immutability_policy.policy_mode + return await self._client.blob.set_immutability_policy(cls=return_response_headers, **kwargs) + + @distributed_trace_async() + async def delete_immutability_policy(self, **kwargs): + # type: (**Any) -> None + """The Delete Immutability Policy operation deletes the immutability policy on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + + await self._client.blob.delete_immutability_policy(**kwargs) + + @distributed_trace_async + async def set_legal_hold(self, legal_hold, **kwargs): + # type: (bool, **Any) -> Dict[str, Union[str, datetime, bool]] + """The Set Legal Hold operation sets a legal hold on the blob. + + .. versionadded:: 12.10.0 + This operation was introduced in API version '2020-10-02'. + + :param bool legal_hold: + Specified if a legal hold should be set on the blob. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, Union[str, datetime, bool]] + """ + + return await self._client.blob.set_legal_hold(legal_hold, cls=return_response_headers, **kwargs) + + @distributed_trace_async + async def create_page_blob( # type: ignore + self, size, # type: int + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + premium_page_blob_tier=None, # type: Optional[Union[str, PremiumPageBlobTier]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Creates a new Page Blob of the specified size. + + :param int size: + This specifies the maximum size for the page blob, up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword int sequence_number: + Only for Page blobs. The sequence number is a user-controlled value that you can use to + track requests. The value of the sequence number must be between 0 + and 2^63 - 1.The default value is 0. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_page_blob_options( + size, + content_settings=content_settings, + metadata=metadata, + premium_page_blob_tier=premium_page_blob_tier, + **kwargs) + try: + return await self._client.page_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_append_blob(self, content_settings=None, metadata=None, **kwargs): + # type: (Optional[ContentSettings], Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a new Append Blob. + + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict[str, Any] + """ + options = self._create_append_blob_options( + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.append_blob.create(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, str]], Any) -> Dict[str, Union[str, datetime]] + """Creates a snapshot of the blob. + + A snapshot is a read-only version of a blob that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a blob as it appears at a moment in time. + + A snapshot of a blob has the same name as the base blob from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START create_blob_snapshot] + :end-before: [END create_blob_snapshot] + :language: python + :dedent: 12 + :caption: Create a snapshot of the blob. + """ + options = self._create_snapshot_options(metadata=metadata, **kwargs) + try: + return await self._client.blob.create_snapshot(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, metadata=None, incremental_copy=False, **kwargs): + # type: (str, Optional[Dict[str, str]], bool, Any) -> Dict[str, Union[str, datetime]] + """Copies a blob from the given URL. + + This operation returns a dictionary containing `copy_status` and `copy_id`, + which can be used to check the status of or abort the copy operation. + `copy_status` will be 'success' if the copy completed synchronously or + 'pending' if the copy has been started asynchronously. For asynchronous copies, + the status can be checked by polling the :func:`get_blob_properties` method and + checking the copy status. Set `requires_sync` to True to force the copy to be synchronous. + The Blob service copies blobs on a best-effort basis. + + The source blob for a copy operation may be a block blob, an append blob, + or a page blob. If the destination blob already exists, it must be of the + same blob type as the source blob. Any existing destination blob will be + overwritten. The destination blob cannot be modified while a copy operation + is in progress. + + When copying from a page blob, the Blob service creates a destination page + blob of the source blob's length, initially containing all zeroes. Then + the source page ranges are enumerated, and non-empty ranges are copied. + + For a block blob or an append blob, the Blob service creates a committed + blob of zero length before returning from this operation. When copying + from a block blob, all committed blocks and their block IDs are copied. + Uncommitted blocks are not copied. At the end of the copy operation, the + destination blob will have the same committed block count as the source. + + When copying from an append blob, all committed blocks are copied. At the + end of the copy operation, the destination blob will have the same committed + block count as the source. + + :param str source_url: + A URL of up to 2 KB in length that specifies a file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.blob.core.windows.net/mycontainer/myblob + + https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot= + + https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken + :param metadata: + Name-value pairs associated with the blob as metadata. If no name-value + pairs are specified, the operation will copy the metadata from the + source blob or file to the destination blob. If one or more name-value + pairs are specified, the destination blob is created with the specified + metadata, and metadata is not copied from the source blob or file. + :type metadata: dict(str, str) + :param bool incremental_copy: + Copies the snapshot of the source page blob to a destination page blob. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination. + The copied snapshots are complete copies of the original snapshot and + can be read or copied from as usual. Defaults to False. + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_). + + The (case-sensitive) literal "COPY" can instead be passed to copy tags from the source blob. + This option is only available when `incremental_copy=False` and `requires_sync=True`. + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) or Literal["COPY"] + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has been modified since the specified date/time. + If the destination blob has not been modified, the Blob service returns + status code 412 (Precondition Failed). + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only + if the destination blob has not been modified since the specified + date/time. If the destination blob has been modified, the Blob service + returns status code 412 (Precondition Failed). + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword destination_lease: + The lease ID specified for this header must match the lease ID of the + destination blob. If the request does not include the lease ID or it is not + valid, the operation fails with status code 412 (Precondition Failed). + :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword source_lease: + Specify this to perform the Copy Blob operation only if + the lease ID given matches the active lease ID of the source blob. + :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword bool seal_destination_blob: + Seal the destination append blob. This operation is only for append blob. + + .. versionadded:: 12.4.0 + + :keyword bool requires_sync: + Enforces that the service will not return a response until the copy is complete. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. This option is only available when `incremental_copy` is + set to False and `requires_sync` is set to True. + + .. versionadded:: 12.9.0 + + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.10.0 + + :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status). + :rtype: dict[str, Union[str, ~datetime.datetime]] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START copy_blob_from_url] + :end-before: [END copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Copy a blob from a URL. + """ + options = self._start_copy_from_url_options( + source_url=self._encode_source_url(source_url), + metadata=metadata, + incremental_copy=incremental_copy, + **kwargs) + try: + if incremental_copy: + return await self._client.page_blob.copy_incremental(**options) + return await self._client.blob.start_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, Dict[str, Any], BlobProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination blob with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of BlobProperties. + :type copy_id: str or ~azure.storage.blob.BlobProperties + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START abort_copy_blob_from_url] + :end-before: [END abort_copy_blob_from_url] + :language: python + :dedent: 16 + :caption: Abort copying a blob from URL. + """ + options = self._abort_copy_options(copy_id, **kwargs) + try: + await self._client.blob.abort_copy_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease(self, lease_duration=-1, lease_id=None, **kwargs): + # type: (int, Optional[str], Any) -> BlobLeaseClient + """Requests a new lease. + + If the blob does not have an active lease, the Blob + Service creates a lease on the blob and returns a new lease. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A BlobLeaseClient object. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 12 + :caption: Acquiring a lease on a blob. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(lease_duration=lease_duration, **kwargs) + return lease + + @distributed_trace_async + async def set_standard_blob_tier(self, standard_blob_tier, **kwargs): + # type: (Union[str, StandardBlobTier], Any) -> None + """This operation sets the tier on a block blob. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + :param standard_blob_tier: + Indicates the tier to be set on the blob. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if standard_blob_tier is None: + raise ValueError("A StandardBlobTier must be specified") + try: + await self._client.blob.set_tier( + tier=standard_blob_tier, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block( + self, block_id, # type: str + data, # type: Union[Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param data: The blob data. + :param int length: Size of the block. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + options = self._stage_block_options( + block_id, + data, + length=length, + **kwargs) + try: + return await self._client.block_blob.stage_block(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def stage_block_from_url( + self, block_id, # type: Union[str, int] + source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + source_content_md5=None, # type: Optional[Union[bytes, bytearray]] + **kwargs + ): + # type: (...) -> None + """Creates a new block to be committed as part of a blob where + the contents are read from a URL. + + :param str block_id: A string value that identifies the block. + The string should be less than or equal to 64 bytes in size. + For a given blob, the block_id must be the same size for each block. + :param str source_url: The URL. + :param int source_offset: + Start of byte range to use for the block. + Must be set if source length is provided. + :param int source_length: The size of the block in bytes. + :param bytearray source_content_md5: + Specify the md5 calculated for the range of + bytes that must be read from the copy source. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + :rtype: None + """ + options = self._stage_block_from_url_options( + block_id, + source_url=self._encode_source_url(source_url), + source_offset=source_offset, + source_length=source_length, + source_content_md5=source_content_md5, + **kwargs) + try: + return await self._client.block_blob.stage_block_from_url(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_block_list(self, block_list_type="committed", **kwargs): + # type: (Optional[str], Any) -> Tuple[List[BlobBlock], List[BlobBlock]] + """The Get Block List operation retrieves the list of blocks that have + been uploaded as part of a block blob. + + :param str block_list_type: + Specifies whether to return the list of committed + blocks, the list of uncommitted blocks, or both lists together. + Possible values include: 'committed', 'uncommitted', 'all' + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A tuple of two lists - committed and uncommitted blocks + :rtype: tuple(list(~azure.storage.blob.BlobBlock), list(~azure.storage.blob.BlobBlock)) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + try: + blocks = await self._client.block_blob.get_block_list( + list_type=block_list_type, + snapshot=self.snapshot, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return self._get_block_list_result(blocks) + + @distributed_trace_async + async def commit_block_list( # type: ignore + self, block_list, # type: List[BlobBlock] + content_settings=None, # type: Optional[ContentSettings] + metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Commit Block List operation writes a blob by specifying the list of + block IDs that make up the blob. + + :param list block_list: + List of Blockblobs. + :param ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict[str, str] + :keyword tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + + .. versionadded:: 12.4.0 + + :paramtype tags: dict(str, str) + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy: + Specifies the immutability policy of a blob, blob snapshot or blob version. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool legal_hold: + Specified if a legal hold should be set on the blob. + + .. versionadded:: 12.10.0 + This was introduced in API version '2020-10-02'. + + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._commit_block_list_options( + block_list, + content_settings=content_settings, + metadata=metadata, + **kwargs) + try: + return await self._client.block_blob.commit_block_list(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_premium_page_blob_tier(self, premium_page_blob_tier, **kwargs): + # type: (Union[str, PremiumPageBlobTier], **Any) -> None + """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts. + + :param premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + mod_conditions = get_modify_conditions(kwargs) + if premium_page_blob_tier is None: + raise ValueError("A PremiumPageBlobTiermust be specified") + try: + await self._client.blob.set_tier( + tier=premium_page_blob_tier, + timeout=kwargs.pop('timeout', None), + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_blob_tags(self, tags=None, **kwargs): + # type: (Optional[Dict[str, str]], **Any) -> Dict[str, Any] + """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot. + Each call to this operation replaces all existing tags attached to the blob. To remove all + tags from the blob, call this operation with no tags set. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param tags: + Name-value pairs associated with the blob as tag. Tags are case-sensitive. + The tag set may contain at most 10 tags. Tag keys must be between 1 and 128 characters, + and tag values must be between 0 and 256 characters. + Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9), + space (` `), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_) + :type tags: dict(str, str) + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + :keyword bool validate_content: + If true, calculates an MD5 hash of the tags content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified) + :rtype: Dict[str, Any] + """ + options = self._set_blob_tags_options(tags=tags, **kwargs) + try: + return await self._client.blob.set_tags(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_blob_tags(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to add tags to. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Key value pairs of blob tags. + :rtype: Dict[str, str] + """ + options = self._get_blob_tags_options(**kwargs) + try: + _, tags = await self._client.blob.get_tags(**options) + return parse_tags(tags) # pylint: disable=protected-access + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_page_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_snapshot_diff=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """DEPRECATED: Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. + + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param str previous_snapshot_diff: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous blob snapshot to be compared + against a more recent snapshot or the current blob. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + warnings.warn( + "get_page_ranges is deprecated, use list_page_ranges instead", + DeprecationWarning + ) + + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot_diff, + **kwargs) + try: + if previous_snapshot_diff: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + else: + ranges = await self._client.page_blob.get_page_ranges(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace + def list_page_ranges( + self, + *, + offset: Optional[int] = None, + length: Optional[int] = None, + previous_snapshot: Optional[Union[str, Dict[str, Any]]] = None, + **kwargs: Any + ) -> AsyncItemPaged[PageRange]: + """Returns the list of valid page ranges for a Page Blob or snapshot + of a page blob. If `previous_snapshot` is specified, the result will be + a diff of changes between the target blob and the previous snapshot. + + :keyword int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword previous_snapshot: + A snapshot value that specifies that the response will contain only pages that were changed + between target blob and previous snapshot. Changed pages include both updated and cleared + pages. The target blob may be a snapshot, as long as the snapshot specified by `previous_snapshot` + is the older of the two. + :paramtype previous_snapshot: str or Dict[str, Any] + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int results_per_page: + The maximum number of page ranges to retrieve per API call. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) of PageRange. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.PageRange] + """ + results_per_page = kwargs.pop('results_per_page', None) + options = self._get_page_ranges_options( + offset=offset, + length=length, + previous_snapshot_diff=previous_snapshot, + **kwargs) + + if previous_snapshot: + command = partial( + self._client.page_blob.get_page_ranges_diff, + **options) + else: + command = partial( + self._client.page_blob.get_page_ranges, + **options) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=PageRangePaged) + + @distributed_trace_async + async def get_page_range_diff_for_managed_disk( + self, previous_snapshot_url, # type: str + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a managed disk or snapshot. + + .. note:: + This operation is only available for managed disk accounts. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-07-07'. + + :param previous_snapshot_url: + Specifies the URL of a previous snapshot of the managed disk. + The response will only contain pages that were changed between the target blob and + its previous snapshot. + :param int offset: + Start of byte range to use for getting valid page ranges. + If no length is given, all bytes after the offset will be searched. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for getting valid page ranges. + If length is given, offset must be provided. + This range will return valid page ranges from the offset start up to + the specified length. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys. + The first element are filled page ranges, the 2nd element is cleared page ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_page_ranges_options( + offset=offset, + length=length, + prev_snapshot_url=previous_snapshot_url, + **kwargs) + try: + ranges = await self._client.page_blob.get_page_ranges_diff(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_page_ranges_result(ranges) + + @distributed_trace_async + async def set_sequence_number( # type: ignore + self, sequence_number_action, # type: Union[str, SequenceNumberAction] + sequence_number=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the blob sequence number. + + :param str sequence_number_action: + This property indicates how the service should modify the blob's sequence + number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information. + :param str sequence_number: + This property sets the blob's sequence number. The sequence number is a + user-controlled property that you can use to track requests and manage + concurrency issues. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._set_sequence_number_options( + sequence_number_action, sequence_number=sequence_number, **kwargs) + try: + return await self._client.page_blob.update_sequence_number(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_blob(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Union[str, datetime]] + """Resizes a page blob to the specified size. + + If the specified value is less than the current size of the blob, + then all pages above the specified value are cleared. + + :param int size: + Size used to resize blob. Maximum size for a page blob is up to 1 TB. + The page blob size must be aligned to a 512-byte boundary. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._resize_blob_options(size, **kwargs) + try: + return await self._client.page_blob.resize(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_page( # type: ignore + self, page, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """The Upload Pages operation writes a range of pages to a page blob. + + :param bytes page: + Content of the page. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._upload_page_options( + page=page, + offset=offset, + length=length, + **kwargs) + try: + return await self._client.page_blob.upload_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_pages_from_url(self, source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """ + The Upload Pages operation writes a range of pages to a page blob where + the contents are read from a URL. + + :param str source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword bytes source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + + options = self._upload_pages_from_url_options( + source_url=self._encode_source_url(source_url), + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.page_blob.upload_pages_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def clear_page(self, offset, length, **kwargs): + # type: (int, int, Any) -> Dict[str, Union[str, datetime]] + """Clears a range of pages. + + :param int offset: + Start of byte range to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :param int length: + Number of bytes to use for writing to a section of the blob. + Pages must be aligned with 512-byte boundaries, the start offset + must be a modulus of 512 and the length must be a modulus of + 512. + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int if_sequence_number_lte: + If the blob's sequence number is less than or equal to + the specified value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_lt: + If the blob's sequence number is less than the specified + value, the request proceeds; otherwise it fails. + :keyword int if_sequence_number_eq: + If the blob's sequence number is equal to the specified + value, the request proceeds; otherwise it fails. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + options = self._clear_page_options(offset, length, **kwargs) + try: + return await self._client.page_blob.clear_pages(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def append_block( # type: ignore + self, data, # type: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]] + length=None, # type: Optional[int] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """Commits a new block of data to the end of the existing append blob. + + :param data: + Content of the block. + :param int length: + Size of the block in bytes. + :keyword bool validate_content: + If true, calculates an MD5 hash of the block content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https, as https (the default), + will already validate. Note that this MD5 hash is not stored with the + blob. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._append_block_options( + data, + length=length, + **kwargs + ) + try: + return await self._client.append_blob.append_block(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async() + async def append_block_from_url(self, copy_source_url, # type: str + source_offset=None, # type: Optional[int] + source_length=None, # type: Optional[int] + **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """ + Creates a new block to be committed as part of a blob, where the contents are read from a source url. + + :param str copy_source_url: + The URL of the source data. It can point to any Azure Blob or File, that is either public or has a + shared access signature attached. + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + :param int source_length: + This indicates the end of the range of bytes that has to be taken from the copy source. + :keyword bytearray source_content_md5: + If given, the service will calculate the MD5 hash of the block content and compare against this value. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the + AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + The destination ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The destination match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the source resource has been modified since the specified time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the source resource has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._append_block_from_url_options( + copy_source_url=self._encode_source_url(copy_source_url), + source_offset=source_offset, + source_length=source_length, + **kwargs + ) + try: + return await self._client.append_blob.append_block_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async() + async def seal_append_blob(self, **kwargs): + # type: (...) -> Dict[str, Union[str, datetime, int]] + """The Seal operation seals the Append Blob to make it read-only. + + .. versionadded:: 12.4.0 + + :keyword int appendpos_condition: + Optional conditional header, used only for the Append Block operation. + A number indicating the byte offset to compare. Append Block will + succeed only if the append position is equal to this number. If it + is not, the request will fail with the AppendPositionConditionNotMet error + (HTTP status code 412 - Precondition Failed). + :keyword lease: + Required if the blob has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count). + :rtype: dict(str, Any) + """ + options = self._seal_append_blob_options(**kwargs) + try: + return await self._client.append_blob.seal(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _get_container_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> ContainerClient + """Get a client to interact with the blob's parent container. + + The container need not already exist. Defaults to current blob's credentials. + + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_client_from_blob_client] + :end-before: [END get_container_client_from_blob_client] + :language: python + :dedent: 12 + :caption: Get container client from blob object. + """ + from ._container_client_async import ContainerClient + if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=self.container_name, + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_blob_service_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_blob_service_client_async.py new file mode 100644 index 00000000000..ca006c30b20 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_blob_service_client_async.py @@ -0,0 +1,731 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import functools +import warnings +from typing import ( + Any, Dict, List, Optional, Union, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async + + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import ( + parse_to_internal_user_delegation_key, + process_storage_error, + return_response_headers, +) +from .._shared.models import LocationMode +from .._shared.parser import _to_utc_datetime +from .._shared.policies_async import ExponentialRetry +from .._generated.aio import AzureBlobStorage +from .._generated.models import StorageServiceProperties, KeyInfo +from .._blob_service_client import BlobServiceClient as BlobServiceClientBase +from .._deserialize import service_stats_deserialize, service_properties_deserialize +from .._encryption import StorageEncryptionMixin +from .._models import ContainerProperties +from .._serialize import get_api_version +from ._blob_client_async import BlobClient +from ._container_client_async import ContainerClient +from ._models import ContainerPropertiesPaged, FilteredBlobPaged + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from datetime import datetime + from .._shared.models import UserDelegationKey + from ._lease_async import BlobLeaseClient + from .._models import ( + BlobProperties, + PublicAccess, + BlobAnalyticsLogging, + Metrics, + CorsRule, + RetentionPolicy, + StaticWebsite, + ) + + +class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase, StorageEncryptionMixin): + """A client to interact with the Blob Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete containers within the account. + For operations relating to a specific container or blob, clients for those entities + can also be retrieved using the `get_client` functions. + + :param str account_url: + The URL to the blob storage account. Any other entities included + in the URL path (e.g. container or blob) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client] + :end-before: [END create_blob_service_client] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with account url and credential. + + .. literalinclude:: ../samples/blob_samples_authentication_async.py + :start-after: [START create_blob_service_client_oauth] + :end-before: [END create_blob_service_client_oauth] + :language: python + :dedent: 8 + :caption: Creating the BlobServiceClient with Azure Identity credentials. + """ + + def __init__( + self, account_url: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> None: + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(BlobServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs) + self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + self._configure_encryption(kwargs) + + @distributed_trace_async + async def get_user_delegation_key(self, key_start_time, # type: datetime + key_expiry_time, # type: datetime + **kwargs # type: Any + ): + # type: (...) -> UserDelegationKey + """ + Obtain a user delegation key for the purpose of signing SAS tokens. + A token credential must be present on the service object for this request to succeed. + + :param ~datetime.datetime key_start_time: + A DateTime value. Indicates when the key becomes valid. + :param ~datetime.datetime key_expiry_time: + A DateTime value. Indicates when the key stops being valid. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: The user delegation key. + :rtype: ~azure.storage.blob.UserDelegationKey + """ + key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time)) + timeout = kwargs.pop('timeout', None) + try: + user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info, + timeout=timeout, + **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + return parse_to_internal_user_delegation_key(user_delegation_key) # type: ignore + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_account_info] + :end-before: [END get_blob_service_account_info] + :language: python + :dedent: 12 + :caption: Getting account information for the blob service. + """ + try: + return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_stats(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Retrieves statistics related to replication for the Blob service. + + It is only available when read-access geo-redundant replication is enabled for + the storage account. + + With geo-redundant replication, Azure Storage maintains your data durable + in two locations. In both locations, Azure Storage constantly maintains + multiple healthy replicas of your data. The location where you read, + create, update, or delete data is the primary storage account location. + The primary location exists in the region you choose at the time you + create an account via the Azure Management Azure classic portal, for + example, North Central US. The location to which your data is replicated + is the secondary location. The secondary location is automatically + determined based on the location of the primary; it is in a second data + center that resides in the same region as the primary location. Read-only + access is available from the secondary location, if read-access geo-redundant + replication is enabled for your storage account. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: The blob service stats. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_stats] + :end-before: [END get_blob_service_stats] + :language: python + :dedent: 12 + :caption: Getting service stats for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.service.get_statistics( # type: ignore + timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs) + return service_stats_deserialize(stats) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An object containing blob service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_properties] + :end-before: [END get_blob_service_properties] + :language: python + :dedent: 12 + :caption: Getting service properties for the blob service. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, analytics_logging=None, # type: Optional[BlobAnalyticsLogging] + hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + target_version=None, # type: Optional[str] + delete_retention_policy=None, # type: Optional[RetentionPolicy] + static_website=None, # type: Optional[StaticWebsite] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's Blob service, including + Azure Storage Analytics. + + If an element (e.g. analytics_logging) is left as None, the + existing settings on the service for that functionality are preserved. + + :param analytics_logging: + Groups the Azure Analytics Logging settings. + :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for blobs. + :type hour_metrics: ~azure.storage.blob.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for blobs. + :type minute_metrics: ~azure.storage.blob.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list[~azure.storage.blob.CorsRule] + :param str target_version: + Indicates the default version to use for requests if an incoming + request's version is not specified. + :param delete_retention_policy: + The delete retention policy specifies whether to retain deleted blobs. + It also specifies the number of days and versions of blob to keep. + :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy + :param static_website: + Specifies whether the static website feature is enabled, + and if yes, indicates the index document and 404 error document to use. + :type static_website: ~azure.storage.blob.StaticWebsite + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START set_blob_service_properties] + :end-before: [END set_blob_service_properties] + :language: python + :dedent: 12 + :caption: Setting service properties for the blob service. + """ + if all(parameter is None for parameter in [ + analytics_logging, hour_metrics, minute_metrics, cors, + target_version, delete_retention_policy, static_website]): + raise ValueError("set_service_properties should be called with at least one parameter") + + props = StorageServiceProperties( + logging=analytics_logging, + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + default_service_version=target_version, + delete_retention_policy=delete_retention_policy, + static_website=static_website + ) + timeout = kwargs.pop('timeout', None) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_containers( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> AsyncItemPaged[ContainerProperties] + """Returns a generator to list the containers under the specified account. + + The generator will lazily follow the continuation tokens returned by + the service and stop when all containers have been returned. + + :param str name_starts_with: + Filters the results to return only containers whose names + begin with the specified prefix. + :param bool include_metadata: + Specifies that container metadata to be returned in the response. + The default value is `False`. + :keyword bool include_deleted: + Specifies that deleted containers to be returned in the response. This is for container restore enabled + account. The default value is `False`. + .. versionadded:: 12.4.0 + :keyword bool include_system: + Flag specifying that system containers should be included. + .. versionadded:: 12.10.0 + :keyword int results_per_page: + The maximum number of container names to retrieve per API + call. If the request does not specify the server will return up to 5,000 items. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) of ContainerProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_list_containers] + :end-before: [END bsc_list_containers] + :language: python + :dedent: 16 + :caption: Listing the containers in the blob service. + """ + include = ['metadata'] if include_metadata else [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + include_system = kwargs.pop('include_system', None) + if include_system: + include.append("system") + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_containers_segment, + prefix=name_starts_with, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=ContainerPropertiesPaged + ) + + @distributed_trace + def find_blobs_by_tags(self, filter_expression, **kwargs): + # type: (str, **Any) -> AsyncItemPaged[FilteredBlob] + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + To specify a container, eg. "@container='containerName' and \"Name\"='C'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob] + """ + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.service.filter_blobs, + where=filter_expression, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace_async + async def create_container( + self, name, # type: str + metadata=None, # type: Optional[Dict[str, str]] + public_access=None, # type: Optional[Union[PublicAccess, str]] + **kwargs + ): + # type: (...) -> ContainerClient + """Creates a new container under the specified account. + + If the container with the same name already exists, a ResourceExistsError will + be raised. This method returns a client with which to interact with the newly + created container. + + :param str name: The name of the container to create. + :param metadata: + A dict with name-value pairs to associate with the + container as metadata. Example: `{'Category':'test'}` + :type metadata: dict(str, str) + :param public_access: + Possible values include: 'container', 'blob'. + :type public_access: str or ~azure.storage.blob.PublicAccess + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_create_container] + :end-before: [END bsc_create_container] + :language: python + :dedent: 16 + :caption: Creating a container in the blob service. + """ + container = self.get_container_client(name) + timeout = kwargs.pop('timeout', None) + kwargs.setdefault('merge_span', True) + await container.create_container( + metadata=metadata, public_access=public_access, timeout=timeout, **kwargs) + return container + + @distributed_trace_async + async def delete_container( + self, container, # type: Union[ContainerProperties, str] + lease=None, # type: Optional[Union[BlobLeaseClient, str]] + **kwargs + ): + # type: (...) -> None + """Marks the specified container for deletion. + + The container and any blobs contained within it are later deleted during garbage collection. + If the container is not found, a ResourceNotFoundError will be raised. + + :param container: + The container to delete. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_delete_container] + :end-before: [END bsc_delete_container] + :language: python + :dedent: 16 + :caption: Deleting a container in the blob service. + """ + container = self.get_container_client(container) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await container.delete_container( # type: ignore + lease=lease, + timeout=timeout, + **kwargs) + + @distributed_trace_async + async def _rename_container(self, name, new_name, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str name: + The name of the container to rename. + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.ContainerClient + """ + renamed_container = self.get_container_client(new_name) + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id # type: str + except AttributeError: + kwargs['source_lease_id'] = lease + try: + await renamed_container._client.container.rename(name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def undelete_container(self, deleted_container_name, deleted_container_version, **kwargs): + # type: (str, str, **Any) -> ContainerClient + """Restores soft-deleted container. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.4.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_container_name: + Specifies the name of the deleted container to restore. + :param str deleted_container_version: + Specifies the version of the deleted container to restore. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.aio.ContainerClient + """ + new_name = kwargs.pop('new_name', None) + if new_name: + warnings.warn("`new_name` is no longer supported.", DeprecationWarning) + container = self.get_container_client(new_name or deleted_container_name) + try: + await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access + deleted_container_version=deleted_container_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return container + except HttpResponseError as error: + process_storage_error(error) + + def get_container_client(self, container): + # type: (Union[ContainerProperties, str]) -> ContainerClient + """Get a client to interact with the specified container. + + The container need not already exist. + + :param container: + The container. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :returns: A ContainerClient. + :rtype: ~azure.storage.blob.aio.ContainerClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_container_client] + :end-before: [END bsc_get_container_client] + :language: python + :dedent: 12 + :caption: Getting the container client to interact with a specific container. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ContainerClient( + self.url, container_name=container_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) + + def get_blob_client( + self, container, # type: Union[ContainerProperties, str] + blob, # type: Union[BlobProperties, str] + snapshot=None # type: Optional[Union[Dict[str, Any], str]] + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param container: + The container that the blob is in. This can either be the name of the container, + or an instance of ContainerProperties. + :type container: str or ~azure.storage.blob.ContainerProperties + :param blob: + The blob with which to interact. This can either be the name of the blob, + or an instance of BlobProperties. + :type blob: str or ~azure.storage.blob.BlobProperties + :param snapshot: + The optional blob snapshot on which to operate. This can either be the ID of the snapshot, + or a dictionary output returned by + :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`. + :type snapshot: str or dict(str, Any) + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START bsc_get_blob_client] + :end-before: [END bsc_get_blob_client] + :language: python + :dedent: 16 + :caption: Getting the blob client to interact with a specific blob. + """ + try: + container_name = container.name + except AttributeError: + container_name = container + + try: + blob_name = blob.name + except AttributeError: + blob_name = blob + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( # type: ignore + self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_container_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_container_client_async.py new file mode 100644 index 00000000000..70362656d34 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_container_client_async.py @@ -0,0 +1,1416 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method + +import functools +from typing import ( # pylint: disable=unused-import + Any, AnyStr, AsyncIterable, AsyncIterator, Dict, List, IO, Iterable, Optional, overload, Union, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.pipeline import AsyncPipeline +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + process_storage_error, + return_response_headers, + return_headers_and_deserialized +) +from .._generated.aio import AzureBlobStorage +from .._generated.models import SignedIdentifier +from .._container_client import ContainerClient as ContainerClientBase, _get_blob_name +from .._deserialize import deserialize_container_properties +from ._download_async import StorageStreamDownloader +from .._encryption import StorageEncryptionMixin +from .._models import ContainerProperties, BlobType, BlobProperties, FilteredBlob +from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions +from ._blob_client_async import BlobClient +from ._lease_async import BlobLeaseClient +from ._list_blobs_helper import BlobNamesPaged, BlobPropertiesPaged, BlobPrefix +from .._list_blobs_helper import IgnoreListBlobsDeserializer +from ._models import FilteredBlobPaged + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from datetime import datetime + from .._models import ( # pylint: disable=unused-import + AccessPolicy, + StandardBlobTier, + PremiumPageBlobTier, + PublicAccess) + + +class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase, StorageEncryptionMixin): + """A client to interact with a specific container, although that container + may not yet exist. + + For operations relating to a specific blob within this container, a blob client can be + retrieved using the :func:`~get_blob_client` function. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the container, + use the :func:`from_container_url` classmethod. + :param container_name: + The name of the container for the blob. + :type container_name: str + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.2.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. + Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be + uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, + the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. + :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient + algorithm when uploading a block blob. Defaults to 4*1024*1024+1. + :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False. + :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB. + :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call, + the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB. + :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024, + or 4MB. + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_from_service] + :end-before: [END create_container_client_from_service] + :language: python + :dedent: 8 + :caption: Get a ContainerClient from an existing BlobServiceClient. + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container_client_sasurl] + :end-before: [END create_container_client_sasurl] + :language: python + :dedent: 12 + :caption: Creating the container client directly. + """ + def __init__( + self, account_url: str, + container_name: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> None: + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + super(ContainerClient, self).__init__( + account_url, + container_name=container_name, + credential=credential, + **kwargs) + self._api_version = get_api_version(kwargs) + self._client = self._build_generated_client() + self._configure_encryption(kwargs) + + def _build_generated_client(self): + client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline) + client._config.version = self._api_version # pylint: disable=protected-access + return client + + @distributed_trace_async + async def create_container(self, metadata=None, public_access=None, **kwargs): + # type: (Optional[Dict[str, str]], Optional[Union[PublicAccess, str]], **Any) -> Dict[str, Union[str, datetime]] + """ + Creates a new container under the specified account. If the container + with the same name already exists, the operation fails. + + :param metadata: + A dict with name_value pairs to associate with the + container as metadata. Example:{'Category':'test'} + :type metadata: dict[str, str] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword container_encryption_scope: + Specifies the default encryption scope to set on the container and use for + all future writes. + + .. versionadded:: 12.2.0 + + :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A dictionary of response headers. + :rtype: Dict[str, Union[str, datetime]] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START create_container] + :end-before: [END create_container] + :language: python + :dedent: 16 + :caption: Creating a container to store blobs. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + timeout = kwargs.pop('timeout', None) + container_cpk_scope_info = get_container_cpk_scope_info(kwargs) + try: + return await self._client.container.create( # type: ignore + timeout=timeout, + access=public_access, + container_cpk_scope_info=container_cpk_scope_info, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def _rename_container(self, new_name, **kwargs): + # type: (str, **Any) -> ContainerClient + """Renames a container. + + Operation is successful only if the source container exists. + + :param str new_name: + The new container name the user wants to rename to. + :keyword lease: + Specify this to perform only if the lease ID given + matches the active lease ID of the source container. + :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.blob.ContainerClient + """ + lease = kwargs.pop('lease', None) + try: + kwargs['source_lease_id'] = lease.id + except AttributeError: + kwargs['source_lease_id'] = lease + try: + renamed_container = ContainerClient( + "{}://{}".format(self.scheme, self.primary_hostname), container_name=new_name, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) + await renamed_container._client.container.rename(self.container_name, **kwargs) # pylint: disable = protected-access + return renamed_container + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_container( + self, **kwargs): + # type: (Any) -> None + """ + Marks the specified container for deletion. The container and any blobs + contained within it are later deleted during garbage collection. + + :keyword lease: + If specified, delete_container only succeeds if the + container's lease is active and matches this ID. + Required if the container has an active lease. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START delete_container] + :end-before: [END delete_container] + :language: python + :dedent: 16 + :caption: Delete a container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + await self._client.container.delete( + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def acquire_lease( + self, lease_duration=-1, # type: int + lease_id=None, # type: Optional[str] + **kwargs): + # type: (...) -> BlobLeaseClient + """ + Requests a new lease. If the container does not have an active lease, + the Blob service creates a lease on the container and returns a new + lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :param str lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A BlobLeaseClient object, that can be run in a context manager. + :rtype: ~azure.storage.blob.aio.BlobLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START acquire_lease_on_container] + :end-before: [END acquire_lease_on_container] + :language: python + :dedent: 12 + :caption: Acquiring a lease on the container. + """ + lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs) + return lease + + @distributed_trace_async + async def get_account_information(self, **kwargs): + # type: (**Any) -> Dict[str, str] + """Gets information related to the storage account. + + The information can also be retrieved if the user has a SAS to a container or blob. + The keys in the returned dictionary include 'sku_name' and 'account_kind'. + + :returns: A dict of account information (SKU and account type). + :rtype: dict(str, str) + """ + try: + return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_container_properties(self, **kwargs): + # type: (**Any) -> ContainerProperties + """Returns all user-defined metadata and system properties for the specified + container. The data returned does not include the container's list of blobs. + + :keyword lease: + If specified, get_container_properties only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: Properties for the specified container within a container object. + :rtype: ~azure.storage.blob.ContainerProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_properties] + :end-before: [END get_container_properties] + :language: python + :dedent: 16 + :caption: Getting properties on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.container.get_properties( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=deserialize_container_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + response.name = self.container_name + return response # type: ignore + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a container exists and returns False otherwise. + + :kwarg int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: boolean + """ + try: + await self._client.container.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def set_container_metadata( # type: ignore + self, metadata=None, # type: Optional[Dict[str, str]] + **kwargs + ): + # type: (...) -> Dict[str, Union[str, datetime]] + """Sets one or more user-defined name-value pairs for the specified + container. Each call to this operation replaces all existing metadata + attached to the container. To remove all metadata from the container, + call this operation with no metadata dict. + + :param metadata: + A dict containing name-value pairs to associate with the container as + metadata. Example: {'category':'test'} + :type metadata: dict[str, str] + :keyword lease: + If specified, set_container_metadata only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Container-updated property dict (Etag and last modified). + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_metadata] + :end-before: [END set_container_metadata] + :language: python + :dedent: 16 + :caption: Setting metadata on the container. + """ + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + mod_conditions = get_modify_conditions(kwargs) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.container.set_metadata( # type: ignore + timeout=timeout, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def _get_blob_service_client(self): # pylint: disable=client-method-missing-kwargs + # type: (...) -> BlobServiceClient + """Get a client to interact with the container's parent service account. + + Defaults to current container's credentials. + + :returns: A BlobServiceClient. + :rtype: ~azure.storage.blob.BlobServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_service_async.py + :start-after: [START get_blob_service_client_from_container_client] + :end-before: [END get_blob_service_client_from_container_client] + :language: python + :dedent: 8 + :caption: Get blob service client from container object. + """ + from ._blob_service_client_async import BlobServiceClient + if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + else: + _pipeline = self._pipeline # pylint: disable = protected-access + return BlobServiceClient( + "{}://{}".format(self.scheme, self.primary_hostname), + credential=self._raw_credential, api_version=self.api_version, _configuration=self._config, + _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption, + encryption_version=self.encryption_version, key_encryption_key=self.key_encryption_key, + key_resolver_function=self.key_resolver_function, _pipeline=_pipeline) + + + @distributed_trace_async + async def get_container_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the specified container. + The permissions indicate whether container data may be accessed publicly. + + :keyword lease: + If specified, get_container_access_policy only succeeds if the + container's lease is active and matches this ID. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_container_access_policy] + :end-before: [END get_container_access_policy] + :language: python + :dedent: 16 + :caption: Getting the access policy on the container. + """ + lease = kwargs.pop('lease', None) + access_conditions = get_access_conditions(lease) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.container.get_access_policy( + timeout=timeout, + lease_access_conditions=access_conditions, + cls=return_headers_and_deserialized, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('blob_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_container_access_policy( + self, signed_identifiers, # type: Dict[str, AccessPolicy] + public_access=None, # type: Optional[Union[str, PublicAccess]] + **kwargs # type: Any + ): # type: (...) -> Dict[str, Union[str, datetime]] + """Sets the permissions for the specified container or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether blobs in a container may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the container. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy] + :param ~azure.storage.blob.PublicAccess public_access: + Possible values include: 'container', 'blob'. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified date/time. + :keyword ~datetime.datetime if_unmodified_since: + A datetime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Container-updated property dict (Etag and last modified). + :rtype: dict[str, str or ~datetime.datetime] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START set_container_access_policy] + :end-before: [END set_container_access_policy] + :language: python + :dedent: 16 + :caption: Setting access policy on the container. + """ + timeout = kwargs.pop('timeout', None) + lease = kwargs.pop('lease', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore + signed_identifiers = identifiers # type: ignore + + mod_conditions = get_modify_conditions(kwargs) + access_conditions = get_access_conditions(lease) + try: + return await self._client.container.set_access_policy( + container_acl=signed_identifiers or None, + timeout=timeout, + access=public_access, + lease_access_conditions=access_conditions, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_blobs(self, name_starts_with=None, include=None, **kwargs): + # type: (Optional[str], Optional[Union[str, List[str]]], **Any) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', + 'tags', 'versions', 'immutabilitypolicy', 'legalhold'. + :type include: list[str] or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START list_blobs_in_container] + :end-before: [END list_blobs_in_container] + :language: python + :dedent: 12 + :caption: List the blobs in the container. + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_flat_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=BlobPropertiesPaged + ) + + @distributed_trace + def list_blob_names(self, **kwargs: Any) -> AsyncItemPaged[str]: + """Returns a generator to list the names of blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. + + Note that no additional properties or metadata will be returned when using this API. + Additionally this API does not have an option to include additional blobs such as snapshots, + versions, soft-deleted blobs, etc. To get any of this data, use :func:`list_blobs()`. + + :keyword str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of blob names as strings. + :rtype: ~azure.core.async_paging.AsyncItemPaged[str] + """ + name_starts_with = kwargs.pop('name_starts_with', None) + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + + # For listing only names we need to create a one-off generated client and + # override its deserializer to prevent deserialization of the full response. + client = self._build_generated_client() + client.container._deserialize = IgnoreListBlobsDeserializer() # pylint: disable=protected-access + + command = functools.partial( + client.container.list_blob_flat_segment, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + page_iterator_class=BlobNamesPaged) + + @distributed_trace + def walk_blobs( + self, name_starts_with=None, # type: Optional[str] + include=None, # type: Optional[Union[List[str], str]] + delimiter="/", # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> AsyncItemPaged[BlobProperties] + """Returns a generator to list the blobs under the specified container. + The generator will lazily follow the continuation tokens returned by + the service. This operation will list blobs in accordance with a hierarchy, + as delimited by the specified delimiter character. + + :param str name_starts_with: + Filters the results to return only blobs whose names + begin with the specified prefix. + :param include: + Specifies one or more additional datasets to include in the response. + Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions', + 'tags', 'versions', 'immutabilitypolicy', 'legalhold'. + :type include: list[str] or str + :param str delimiter: + When the request includes this parameter, the operation returns a BlobPrefix + element in the response body that acts as a placeholder for all blobs whose + names begin with the same substring up to the appearance of the delimiter + character. The delimiter may be a single character or a string. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of BlobProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties] + """ + if include and not isinstance(include, list): + include = [include] + + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.list_blob_hierarchy_segment, + delimiter=delimiter, + include=include, + timeout=timeout, + **kwargs) + return BlobPrefix( + command, + prefix=name_starts_with, + results_per_page=results_per_page, + delimiter=delimiter) + + @distributed_trace + def find_blobs_by_tags( + self, filter_expression, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> AsyncItemPaged[FilteredBlob] + """Returns a generator to list the blobs under the specified container whose tags + match the given search expression. + The generator will lazily follow the continuation tokens returned by + the service. + + :param str filter_expression: + The expression to find blobs whose tags matches the specified condition. + eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'" + :keyword int results_per_page: + The max result per page when paginating. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) response of FilteredBlob. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties] + """ + results_per_page = kwargs.pop('results_per_page', None) + timeout = kwargs.pop('timeout', None) + command = functools.partial( + self._client.container.filter_blobs, + timeout=timeout, + where=filter_expression, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=FilteredBlobPaged) + + @distributed_trace_async + async def upload_blob( + self, name: Union[str, BlobProperties], + data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]], + blob_type: Union[str, BlobType] = BlobType.BlockBlob, + length: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs + ) -> BlobClient: + """Creates a new blob from a data source with automatic chunking. + + :param name: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type name: str or ~azure.storage.blob.BlobProperties + :param data: The blob data to upload. + :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be + either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :param metadata: + Name-value pairs associated with the blob as metadata. + :type metadata: dict(str, str) + :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data. + If True, upload_blob will overwrite the existing data. If set to False, the + operation will fail with ResourceExistsError. The exception to the above is with Append + blob types: if set to False and the data already exists, an error will not be raised + and the data will be appended to the existing blob. If set overwrite=True, then the existing + append blob will be deleted, and a new one created. Defaults to False. + :keyword ~azure.storage.blob.ContentSettings content_settings: + ContentSettings object used to set blob properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the container has an active lease. Value can be a BlobLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + multiple calls to the Azure service and the timeout will apply to + each call individually. + :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier: + A page blob tier value to set the blob to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier: + A standard blob tier value to set the blob to. For this version of the library, + this is only applicable to block blobs on standard storage accounts. + :keyword int maxsize_condition: + Optional conditional header. The max length in bytes permitted for + the append blob. If the Append Block operation would cause the blob + to exceed that limit or if the blob size is already greater than the + value specified in this header, the request will fail with + MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed). + :keyword int max_concurrency: + Maximum number of parallel connections to use when the blob size exceeds + 64MB. + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword str encryption_scope: + A predefined encryption scope used to encrypt the data on the service. An encryption + scope can be created using the Management API and referenced here by name. If a default + encryption scope has been defined at the container, this value will override it if the + container-level scope is configured to allow overrides. Otherwise an error will be raised. + + .. versionadded:: 12.2.0 + + :keyword str encoding: + Defaults to UTF-8. + :keyword progress_hook: + An async callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transfered + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]] + :returns: A BlobClient to interact with the newly uploaded blob. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START upload_blob_to_container] + :end-before: [END upload_blob_to_container] + :language: python + :dedent: 12 + :caption: Upload blob to the container. + """ + blob = self.get_blob_client(name) + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + await blob.upload_blob( + data, + blob_type=blob_type, + length=length, + metadata=metadata, + timeout=timeout, + encoding=encoding, + **kwargs + ) + return blob + + @distributed_trace_async + async def delete_blob( + self, blob, # type: Union[str, BlobProperties] + delete_snapshots=None, # type: Optional[str] + **kwargs + ): + # type: (...) -> None + """Marks the specified blob or snapshot for deletion. + + The blob is later deleted during garbage collection. + Note that in order to delete a blob, you must delete all of its + snapshots. You can delete both at the same time with the delete_blob + operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot + and retains the blob or snapshot for specified number of days. + After specified number of days, blob's data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blob or snapshot can be restored using :func:`~azure.storage.blob.aio.BlobClient.undelete()` + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str delete_snapshots: + Required if the blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to delete. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword lease: + Required if the blob has an active lease. Value can be a Lease object + or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + blob = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + timeout = kwargs.pop('timeout', None) + await blob.delete_blob( # type: ignore + delete_snapshots=delete_snapshots, + timeout=timeout, + **kwargs) + + @overload + async def download_blob( + self, blob: Union[str, BlobProperties], + offset: int = None, + length: int = None, + *, + encoding: str, + **kwargs) -> StorageStreamDownloader[str]: + ... + + @overload + async def download_blob( + self, blob: Union[str, BlobProperties], + offset: int = None, + length: int = None, + *, + encoding: None = None, + **kwargs) -> StorageStreamDownloader[bytes]: + ... + + @distributed_trace_async + async def download_blob( + self, blob: Union[str, BlobProperties], + offset: int = None, + length: int = None, + *, + encoding: Optional[str] = None, + **kwargs) -> StorageStreamDownloader: + """Downloads a blob to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the blob into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param blob: The blob with which to interact. If specified, this value will override + a blob value specified in the blob URL. + :type blob: str or ~azure.storage.blob.BlobProperties + :param int offset: + Start of byte range to use for downloading a section of the blob. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword str version_id: + The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to download. + + .. versionadded:: 12.4.0 + This keyword argument was introduced in API version '2019-12-12'. + + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the blob. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https, as https (the default), will + already validate. Note that this MD5 hash is not stored with the + blob. Also note that if enabled, the memory-efficient upload algorithm + will not be used because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the blob has an active lease. If specified, download_blob only + succeeds if the blob's lease is active and matches this ID. Value can be a + BlobLeaseClient object or the lease ID as a string. + :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk: + Encrypts the data on the service-side with the given key. + Use of customer-provided keys must be done over HTTPS. + As the encryption key itself is provided in the request, + a secure connection must be established to transfer the key. + :keyword int max_concurrency: + The number of parallel connections with which to download. + :keyword str encoding: + Encoding to decode the downloaded bytes. Default is None, i.e. no decoding. + :keyword progress_hook: + An async callback to track the progress of a long running download. The signature is + function(current: int, total: int) where current is the number of bytes transfered + so far, and total is the total size of the download. + :paramtype progress_hook: Callable[[int, int], Awaitable[None]] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. This method may make multiple calls to the service and + the timeout will apply to each call individually. + multiple calls to the Azure service and the timeout will apply to + each call individually. + :returns: A streaming object. (StorageStreamDownloader) + :rtype: ~azure.storage.blob.aio.StorageStreamDownloader + """ + blob_client = self.get_blob_client(blob) # type: ignore + kwargs.setdefault('merge_span', True) + return await blob_client.download_blob( + offset=offset, + length=length, + encoding=encoding, + **kwargs) + + @distributed_trace_async + async def delete_blobs( + self, *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ) -> AsyncIterator[AsyncHttpResponse]: + """Marks the specified blobs or snapshots for deletion. + + The blobs are later deleted during garbage collection. + Note that in order to delete blobs, you must delete all of their + snapshots. You can delete both at the same time with the delete_blobs operation. + + If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots + and retains the blobs or snapshots for specified number of days. + After specified number of days, blobs' data is removed from the service during garbage collection. + Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]` + Soft-deleted blobs or snapshots can be restored using :func:`~azure.storage.blob.aio.BlobClient.undelete()` + + The maximum number of blobs that can be deleted in a single request is 256. + + :param blobs: + The blobs to delete. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + snapshot you want to delete: + key: 'snapshot', value type: str + whether to delete snapthots when deleting blob: + key: 'delete_snapshots', value: 'include' or 'only' + if the blob modified or not: + key: 'if_modified_since', 'if_unmodified_since', value type: datetime + etag: + key: 'etag', value type: str + match the etag or not: + key: 'match_condition', value type: MatchConditions + tags match condition: + key: 'if_tags_match_condition', value type: str + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties + :keyword str delete_snapshots: + Required if a blob has associated snapshots. Values include: + - "only": Deletes only the blobs snapshots. + - "include": Deletes the blob along with all snapshots. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common_async.py + :start-after: [START delete_multiple_blobs] + :end-before: [END delete_multiple_blobs] + :language: python + :dedent: 12 + :caption: Deleting multiple blobs. + """ + if len(blobs) == 0: + return iter(list()) + + reqs, options = self._generate_delete_blobs_options(*blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace_async + async def set_standard_blob_tier_blobs( + self, standard_blob_tier: Union[str, 'StandardBlobTier'], + *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ) -> AsyncIterator[AsyncHttpResponse]: + """This operation sets the tier on block blobs. + + A block blob's tier determines Hot/Cool/Archive storage type. + This operation does not update the blob's ETag. + + The maximum number of blobs that can be updated in a single request is 256. + + :param standard_blob_tier: + Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool', + 'Archive'. The hot tier is optimized for storing data that is accessed + frequently. The cool storage tier is optimized for storing data that + is infrequently accessed and stored for at least a month. The archive + tier is optimized for storing data that is rarely accessed and stored + for at least six months with flexible latency requirements. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier + :param blobs: + The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + blob name: + key: 'name', value type: str + standard blob tier: + key: 'blob_tier', value type: StandardBlobTier + rehydrate priority: + key: 'rehydrate_priority', value type: RehydratePriority + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + tags match condition: + key: 'if_tags_match_condition', value type: str + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties + :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority: + Indicates the priority with which to rehydrate an archived blob + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(standard_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + @distributed_trace_async + async def set_premium_page_blob_tier_blobs( + self, premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'], + *blobs: Union[str, Dict[str, Any], BlobProperties], + **kwargs: Any + ) -> AsyncIterator[AsyncHttpResponse]: + """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts. + + The maximum number of blobs that can be updated in a single request is 256. + + :param premium_page_blob_tier: + A page blob tier value to set on all blobs to. The tier correlates to the size of the + blob and number of allowed IOPS. This is only applicable to page blobs on + premium storage accounts. + + .. note:: + If you want to set different tier on different blobs please set this positional parameter to None. + Then the blob tier on every BlobProperties will be taken. + + :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier + :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can + be supplied, where each value is either the name of the blob (str) or BlobProperties. + + .. note:: + When the blob type is dict, here's a list of keys, value rules. + + blob name: + key: 'name', value type: str + premium blob tier: + key: 'blob_tier', value type: PremiumPageBlobTier + lease: + key: 'lease_id', value type: Union[str, LeaseClient] + timeout for subrequest: + key: 'timeout', value type: int + + :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool raise_on_any_failure: + This is a boolean param which defaults to True. When this is set, an exception + is raised even if there is a single operation failure. For optimal performance, + this should be set to False. + :return: An async iterator of responses, one for each blob in order + :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse] + """ + reqs, options = self._generate_set_tiers_options(premium_page_blob_tier, *blobs, **kwargs) + + return await self._batch_send(*reqs, **options) + + def get_blob_client( + self, blob, # type: Union[BlobProperties, str] + snapshot=None # type: str + ): + # type: (...) -> BlobClient + """Get a client to interact with the specified blob. + + The blob need not already exist. + + :param blob: + The blob with which to interact. + :type blob: str or ~azure.storage.blob.BlobProperties + :param str snapshot: + The optional blob snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`~BlobClient.create_snapshot()`. + :returns: A BlobClient. + :rtype: ~azure.storage.blob.aio.BlobClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_containers_async.py + :start-after: [START get_blob_client] + :end-before: [END get_blob_client] + :language: python + :dedent: 12 + :caption: Get the blob client. + """ + blob_name = _get_blob_name(blob) + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return BlobClient( + self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot, + credential=self.credential, api_version=self.api_version, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts, + require_encryption=self.require_encryption, encryption_version=self.encryption_version, + key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_download_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_download_async.py new file mode 100644 index 00000000000..eb3bd645d42 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_download_async.py @@ -0,0 +1,724 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import sys +import warnings +from io import BytesIO +from itertools import islice +from typing import AsyncIterator, Generic, IO, Optional, TypeVar + +import asyncio + +from azure.core.exceptions import HttpResponseError + +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range +from .._deserialize import deserialize_blob_properties, get_page_ranges_result +from .._download import process_range_and_offset, _ChunkDownloader +from .._encryption import ( + adjust_blob_size_for_encryption, + decrypt_blob, + is_encryption_v2, + parse_encryption_data +) + +T = TypeVar('T', bytes, str) + + +async def process_content(data, start_offset, end_offset, encryption): + if data is None: + raise ValueError("Response cannot be None.") + content = data.response.body() + if encryption.get('key') is not None or encryption.get('resolver') is not None: + try: + return decrypt_blob( + encryption.get('required'), + encryption.get('key'), + encryption.get('resolver'), + content, + start_offset, + end_offset, + data.response.headers) + except Exception as error: + raise HttpResponseError( + message="Decryption failed.", + response=data.response, + error=error) + return content + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + await self.progress_hook(self.progress_total, self.total_size) + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + download_range, offset = process_range_and_offset( + chunk_start, chunk_end, chunk_end, self.encryption_options, self.encryption_data + ) + + # No need to download the empty chunk from server if there's no data in the chunk to be downloaded. + # Do optimize and create empty chunk locally if condition is met. + if self._do_optimize(download_range[0], download_range[1]): + chunk_data = b"\x00" * self.chunk_size + else: + range_header, range_validation = validate_and_format_range_headers( + download_range[0], + download_range[1], + check_content_md5=self.validate_content + ) + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options) + + + # This makes sure that if_match is set so that we can validate + # that subsequent downloads are to an unmodified blob + if self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = response.properties.etag + + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += await self._iter_downloader.yield_chunk(chunk) + except StopIteration: + self._complete = True + # it's likely that there some data left in self._current_content + if self._current_content: + return self._current_content + raise StopAsyncIteration("Download complete") + + return self._get_chunk_data() + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(Generic[T]): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the blob being downloaded. + :ivar str container: + The name of the container where the blob is. + :ivar ~azure.storage.blob.BlobProperties properties: + The properties of the blob being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if speficied, + otherwise the total size of the blob. + """ + + def __init__( + self, + clients=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + encryption_options=None, + max_concurrency=1, + name=None, + container=None, + encoding=None, + download_cls=None, + **kwargs + ): + self.name = name + self.container = container + self.properties = None + self.size = None + + self._clients = clients + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._encryption_options = encryption_options or {} + self._progress_hook = kwargs.pop('progress_hook', None) + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._non_empty_ranges = None + self._response = None + self._encryption_data = None + self._offset = 0 + + self._initial_range = None + self._initial_offset = None + + # The cls is passed in via download_cls to avoid conflicting arg name with Generic.__new__ + # but needs to be changed to cls in the request options. + self._request_options['cls'] = download_cls + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + + def __len__(self): + return self.size + + async def _get_encryption_data_request(self): + # Save current request cls + download_cls = self._request_options.pop('cls', None) + # Adjust cls for get_properties + self._request_options['cls'] = deserialize_blob_properties + + properties = await self._clients.blob.get_properties(**self._request_options) + # This will return None if there is no encryption metadata or there are parsing errors. + # That is acceptable here, the proper error will be caught and surfaced when attempting + # to decrypt the blob. + self._encryption_data = parse_encryption_data(properties.metadata) + + # Restore cls for download + self._request_options['cls'] = download_cls + + async def _setup(self): + if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None: + await self._get_encryption_data_request() + + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range, self._initial_offset = process_range_and_offset( + initial_request_start, + initial_request_end, + self._end_range, + self._encryption_options, + self._encryption_data + ) + + self._response = await self._initial_request() + + self.properties = self._response.properties + self.properties.name = self.name + self.properties.container = self.container + + # Set the content length to the download size instead of the size of + # the last range + initial_size = self._response.properties.size + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content( + self._response, + self._initial_offset[0], + self._initial_offset[1], + self._encryption_options + ) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + # For encryption V2, calculate based on size of decrypted content, not download size. + if is_encryption_v2(self._encryption_data): + self._download_complete = len(self._current_content) >= self.size + else: + self._download_complete = initial_size >= self.size + + if not self._download_complete and self._request_options.get("modified_access_conditions"): + self._request_options["modified_access_conditions"].if_match = self._response.properties.etag + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + try: + location_mode, response = await self._clients.blob.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._file_size is None: + raise ValueError("Required Content-Range response header is missing or malformed.") + # Remove any extra encryption data size from blob size + self._file_size = adjust_blob_size_for_encryption(self._file_size, self._encryption_data) + + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response and error.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._clients.blob.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # get page ranges to optimize downloading sparse page blob + if response.properties.blob_type == 'PageBlob': + try: + page_ranges = await self._clients.page_blob.get_page_ranges() + self._non_empty_ranges = get_page_ranges_result(page_ranges)[0] + except HttpResponseError: + pass + + return response + + def _get_downloader_start_with_offset(self): + # Start where the initial request download ended + start = self._initial_range[1] + 1 + # For encryption V2 only, adjust start to the end of the fetched data rather than download size + if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None: + start = (self._start_range or 0) + len(self._current_content) + + # Adjust the start based on any data read past the current content + start += (self._offset - len(self._current_content)) + return start + + def chunks(self): + # type: () -> AsyncIterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: AsyncIterator[bytes] + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_hello_world_async.py + :start-after: [START download_a_blob_in_chunk] + :end-before: [END download_a_blob_in_chunk] + :language: python + :dedent: 16 + :caption: Download a blob using chunks(). + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + data_start = self._initial_range[1] + 1 # Start where the first download ended + # For encryption, adjust start to the end of the fetched data rather than download size + if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None: + data_start = (self._start_range or 0) + len(self._current_content) + + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=data_start, + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + encryption_data=self._encryption_data, + use_location=self._location_mode, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + async def read(self, size: Optional[int] = -1) -> T: + """ + Read up to size bytes from the stream and return them. If size + is unspecified or is -1, all bytes will be read. + + :param size: + The number of bytes to download from the stream. Leave unspecified + or set to -1 to download all bytes. + :returns: + The requested data as bytes or a string if encoding was specified. If + the return value is empty, there is no more data to read. + :rtype: T + """ + if size == -1: + return await self.readall() + # Empty blob or already read to the end + if size == 0 or self._offset >= self.size: + return b'' if not self._encoding else '' + + stream = BytesIO() + remaining_size = size + + # Start by reading from current_content if there is data left + if self._offset < len(self._current_content): + start = self._offset + length = min(remaining_size, len(self._current_content) - self._offset) + read = stream.write(self._current_content[start:start + length]) + + remaining_size -= read + self._offset += read + if self._progress_hook: + await self._progress_hook(self._offset, self.size) + + if remaining_size > 0: + start_range = self._get_downloader_start_with_offset() + + # End is the min between the remaining size, the file size, and the end of the specified range + end_range = min(start_range + remaining_size, self._file_size) + if self._end_range is not None: + end_range = min(end_range, self._end_range + 1) + + parallel = self._max_concurrency > 1 + downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._offset, + start_range=start_range, + end_range=end_range, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + encryption_data=self._encryption_data, + use_location=self._location_mode, + progress_hook=self._progress_hook, + **self._request_options + ) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + done, _running_futures = await asyncio.wait(running_futures) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + + self._offset += remaining_size + + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def readall(self) -> T: + """ + Read the entire contents of this blob. + This operation is blocking until all data is downloaded. + + :returns: The requested data as bytes or a string if encoding was specified. + :rtype: T + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """DEPRECATED: Download the contents of this file. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """DEPRECATED: Download the contents of this blob, and decode as text. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :param int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream: IO[T]) -> int: + """Download the contents of this blob to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # If some data has been streamed using `read`, only stream the remaining data + remaining_size = self.size - self._offset + # Already read to the end + if remaining_size <= 0: + return 0 + + # Write the content to the user stream if there is data left + if self._offset < len(self._current_content): + content = self._current_content[self._offset:] + stream.write(content) + self._offset += len(content) + if self._progress_hook: + await self._progress_hook(len(content), self.size) + + if self._download_complete: + return remaining_size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + data_start = self._get_downloader_start_with_offset() + + downloader = _AsyncChunkDownloader( + client=self._clients.blob, + non_empty_ranges=self._non_empty_ranges, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._offset, + start_range=data_start, + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + encryption_options=self._encryption_options, + encryption_data=self._encryption_data, + use_location=self._location_mode, + progress_hook=self._progress_hook, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + done, _running_futures = await asyncio.wait(running_futures) + try: + for task in done: + task.result() + except HttpResponseError as error: + process_storage_error(error) + + return remaining_size + + async def download_to_stream(self, stream, max_concurrency=1): + """DEPRECATED: Download the contents of this blob to a stream. + + This method is deprecated, use func:`readinto` instead. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :param int max_concurrency: + The number of parallel connections with which to download. + :returns: The properties of the downloaded blob. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_lease_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_lease_async.py new file mode 100644 index 00000000000..7759f108d49 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_lease_async.py @@ -0,0 +1,345 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._serialize import get_modify_conditions +from .._lease import BlobLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + from .._generated.operations import BlobOperations, ContainerOperations + BlobClient = TypeVar("BlobClient") + ContainerClient = TypeVar("ContainerClient") + + +class BlobLeaseClient(LeaseClientBase): + """Creates a new BlobLeaseClient. + + This client provides lease operations on a BlobClient or ContainerClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the blob or container to lease. + :type client: ~azure.storage.blob.aio.BlobClient or + ~azure.storage.blob.aio.ContainerClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, lease_duration=-1, **kwargs): + # type: (int, Any) -> None + """Requests a new lease. + + If the container does not have an active lease, the Blob service creates a + lease on the container and returns a new lease ID. + + :param int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the lease. + + The lease can be renewed if the lease ID specified in the + lease client matches that associated with the container or blob. Note that + the lease may be renewed even if it has expired as long as the container + or blob has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Release the lease. + + The lease may be released if the client lease id specified matches + that associated with the container or blob. Releasing the lease allows another client + to immediately acquire the lease for the container or blob as soon as the release is complete. + + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """Change the lease ID of an active lease. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The Blob service returns 400 + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str etag: + An ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions match_condition: + The match condition to use upon the etag. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, lease_break_period=None, **kwargs): + # type: (Optional[int], Any) -> int + """Break the lease, if the container or blob has an active lease. + + Once a lease is broken, it cannot be renewed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. When a lease + is broken, the lease break period is allowed to elapse, during which time + no lease operation except break and release can be performed on the container or blob. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :param int lease_break_period: + This is the proposed duration of seconds that the lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the lease. If longer, the time remaining on the lease is used. + A new lease will not be available before the break period has + expired, but the lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration lease breaks after the remaining lease + period elapses, and an infinite lease breaks immediately. + :keyword ~datetime.datetime if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only + if the resource has been modified since the specified time. + :keyword ~datetime.datetime if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this header to perform the operation only if + the resource has not been modified since the specified date/time. + :keyword str if_tags_match_condition: + Specify a SQL where clause on blob tags to operate only on blob with a matching value. + eg. ``\"\\\"tagname\\\"='my tag'\"`` + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + mod_conditions = get_modify_conditions(kwargs) + try: + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + break_period=lease_break_period, + modified_access_conditions=mod_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_list_blobs_helper.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_list_blobs_helper.py new file mode 100644 index 00000000000..8f3414f0977 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_list_blobs_helper.py @@ -0,0 +1,252 @@ +# pylint: disable=too-many-lines +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from urllib.parse import unquote + +from azure.core.async_paging import AsyncPageIterator, AsyncItemPaged +from azure.core.exceptions import HttpResponseError + +from .._deserialize import ( + get_blob_properties_from_generated_code, + load_many_xml_nodes, + load_xml_int, + load_xml_string, +) +from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix +from .._models import BlobProperties +from .._shared.models import DictMixin +from .._shared.response_handlers import ( + return_context_and_deserialized, + return_raw_deserialized, + process_storage_error, +) + + +class BlobPropertiesPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The container that the blobs are listed from. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + delimiter=None, + location_mode=None): + super(BlobPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.delimiter = delimiter + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.container = self._response.container_name + self.current_page = [self._build_item(item) for item in self._response.segment.blob_items] + + return self._response.next_marker or None, self.current_page + + def _build_item(self, item): + if isinstance(item, BlobProperties): + return item + if isinstance(item, BlobItemInternal): + blob = get_blob_properties_from_generated_code(item) # pylint: disable=protected-access + blob.container = self.container + return blob + return item + + +class BlobNamesPaged(AsyncPageIterator): + """An Iterable of Blob names. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(str) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + prefix=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(BlobNamesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + prefix=self.prefix, + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_raw_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.get('ServiceEndpoint') + self.prefix = load_xml_string(self._response, 'Prefix') + self.marker = load_xml_string(self._response, 'Marker') + self.results_per_page = load_xml_int(self._response, 'MaxResults') + self.container = self._response.get('ContainerName') + + blobs = load_many_xml_nodes(self._response, 'Blob', wrapper='Blobs') + self.current_page = [load_xml_string(blob, 'Name') for blob in blobs] + + next_marker = load_xml_string(self._response, 'NextMarker') + return next_marker or None, self.current_page + + +class BlobPrefix(AsyncItemPaged, DictMixin): + """An Iterable of Blob properties. + + Returned from walk_blobs when a delimiter is used. + Can be thought of as a virtual blob directory. + + :ivar str name: The prefix, or "directory name" of the blob. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token of the current page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :ivar str delimiter: A delimiting character used for hierarchy listing. + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only blobs whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str marker: An opaque continuation token. + :param str delimiter: + Used to capture blobs whose names begin with the same substring up to + the appearance of the delimiter character. The delimiter may be a single + character or a string. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__(self, *args, **kwargs): + super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs) + self.name = kwargs.get('prefix') + self.prefix = kwargs.get('prefix') + self.results_per_page = kwargs.get('results_per_page') + self.container = kwargs.get('container') + self.delimiter = kwargs.get('delimiter') + self.location_mode = kwargs.get('location_mode') + + +class BlobPrefixPaged(BlobPropertiesPaged): + def __init__(self, *args, **kwargs): + super(BlobPrefixPaged, self).__init__(*args, **kwargs) + self.name = self.prefix + + async def _extract_data_cb(self, get_next_return): + continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return) + self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items + self.current_page = [self._build_item(item) for item in self.current_page] + self.delimiter = self._response.delimiter + + return continuation_token, self.current_page + + def _build_item(self, item): + item = super(BlobPrefixPaged, self)._build_item(item) + if isinstance(item, GenBlobPrefix): + if item.name.encoded: + name = unquote(item.name.content) + else: + name = item.name.content + return BlobPrefix( + self._command, + container=self.container, + prefix=name, + results_per_page=self.results_per_page, + location_mode=self.location_mode) + return item diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_models.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_models.py new file mode 100644 index 00000000000..dc8f96fdff1 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_models.py @@ -0,0 +1,178 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator +from azure.core.exceptions import HttpResponseError +from .._deserialize import parse_tags + +from .._models import ContainerProperties, FilteredBlob, parse_page_list +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error + +from .._generated.models import FilterBlobItem + + +class ContainerPropertiesPaged(AsyncPageIterator): + """An Iterable of Container properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A container name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.models.ContainerProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only containers whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of container names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(ContainerPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [self._build_item(item) for item in self._response.container_items] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + return ContainerProperties._from_generated(item) # pylint: disable=protected-access + + +class FilteredBlobPaged(AsyncPageIterator): + """An Iterable of Blob properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A blob name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.blob.BlobProperties) + :ivar str container: The container that the blobs are listed from. + :param callable command: Function to retrieve the next page of items. + :param str container: The name of the container. + :param int results_per_page: The maximum number of blobs to retrieve per + call. + :param str continuation_token: An opaque continuation token. + :param location_mode: Specifies the location the request should be sent to. + This mode only applies for RA-GRS accounts which allow secondary read access. + Options include 'primary' or 'secondary'. + """ + def __init__( + self, command, + container=None, + results_per_page=None, + continuation_token=None, + location_mode=None): + super(FilteredBlobPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.marker = continuation_token + self.results_per_page = results_per_page + self.container = container + self.current_page = None + self.location_mode = location_mode + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.marker = self._response.next_marker + self.current_page = [self._build_item(item) for item in self._response.blobs] + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_item(item): + if isinstance(item, FilterBlobItem): + tags = parse_tags(item.tags) + blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags) + return blob + return item + + +class PageRangePaged(AsyncPageIterator): + def __init__(self, command, results_per_page=None, continuation_token=None): + super(PageRangePaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = self._build_page(self._response) + + return self._response.next_marker or None, self.current_page + + @staticmethod + def _build_page(response): + if not response: + raise StopIteration + + return parse_page_list(response) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_upload_helpers.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_upload_helpers.py new file mode 100644 index 00000000000..fb42e700a4d --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/aio/_upload_helpers.py @@ -0,0 +1,328 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import inspect +from io import SEEK_SET, UnsupportedOperation +from typing import TypeVar, TYPE_CHECKING + +from azure.core.exceptions import ResourceModifiedError, HttpResponseError + +from .._shared.response_handlers import process_storage_error, return_response_headers +from .._shared.uploads_async import ( + upload_data_chunks, + upload_substream_blocks, + BlockBlobChunkUploader, + PageBlobChunkUploader, + AppendBlobChunkUploader +) +from .._generated.models import ( + BlockLookupList, + AppendPositionAccessConditions, + ModifiedAccessConditions, +) +from .._encryption import ( + GCMBlobEncryptionStream, + encrypt_blob, + get_adjusted_upload_size, + get_blob_encryptor_and_padder, + generate_blob_encryption_data, + _ENCRYPTION_PROTOCOL_V1, + _ENCRYPTION_PROTOCOL_V2 +) +from .._upload_helpers import _convert_mod_error, _any_conditions + +if TYPE_CHECKING: + BlobLeaseClient = TypeVar("BlobLeaseClient") + + +async def upload_block_blob( # pylint: disable=too-many-locals, too-many-statements + client=None, + data=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + adjusted_count = length + if (encryption_options.get('key') is not None) and (adjusted_count is not None): + adjusted_count = get_adjusted_upload_size(adjusted_count, encryption_options['version']) + blob_headers = kwargs.pop('blob_headers', None) + tier = kwargs.pop('standard_blob_tier', None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + + immutability_policy = kwargs.pop('immutability_policy', None) + immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time + immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode + legal_hold = kwargs.pop('legal_hold', None) + progress_hook = kwargs.pop('progress_hook', None) + + # Do single put if the size is smaller than config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): + try: + data = data.read(length) + if inspect.isawaitable(data): + data = await data + if not isinstance(data, bytes): + raise TypeError('Blob data should be of type bytes.') + except AttributeError: + pass + if encryption_options.get('key'): + encryption_data, data = encrypt_blob(data, encryption_options['key'], encryption_options['version']) + headers['x-ms-meta-encryptiondata'] = encryption_data + response = await client.upload( + body=data, + content_length=adjusted_count, + blob_http_headers=blob_headers, + headers=headers, + cls=return_response_headers, + validate_content=validate_content, + data_stream_total=adjusted_count, + upload_stream_current=0, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + + if progress_hook: + await progress_hook(adjusted_count, adjusted_count) + + return response + + use_original_upload_path = blob_settings.use_byte_buffer or \ + validate_content or encryption_options.get('required') or \ + blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \ + hasattr(stream, 'seekable') and not stream.seekable() or \ + not hasattr(stream, 'seek') or not hasattr(stream, 'tell') + + if use_original_upload_path: + total_size = length + encryptor, padder = None, None + if encryption_options and encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data( + encryption_options['key'], + encryption_options['version']) + headers['x-ms-meta-encryptiondata'] = encryption_data + + if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1: + encryptor, padder = get_blob_encryptor_and_padder(cek, iv, True) + + # Adjust total_size for encryption V2 + if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V2: + # Adjust total_size for encryption V2 + total_size = adjusted_count + # V2 wraps the data stream with an encryption stream + stream = GCMBlobEncryptionStream(cek, stream) + + block_ids = await upload_data_chunks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=total_size, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + progress_hook=progress_hook, + encryptor=encryptor, + padder=padder, + headers=headers, + **kwargs + ) + else: + block_ids = await upload_substream_blocks( + service=client, + uploader_class=BlockBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + max_concurrency=max_concurrency, + stream=stream, + validate_content=validate_content, + progress_hook=progress_hook, + headers=headers, + **kwargs + ) + + block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[]) + block_lookup.latest = block_ids + return await client.commit_block_list( + block_lookup, + blob_http_headers=blob_headers, + cls=return_response_headers, + validate_content=validate_content, + headers=headers, + tier=tier.value if tier else None, + blob_tags_string=blob_tags_string, + immutability_policy_expiry=immutability_policy_expiry, + immutability_policy_mode=immutability_policy_mode, + legal_hold=legal_hold, + **kwargs) + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_page_blob( + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if not overwrite and not _any_conditions(**kwargs): + kwargs['modified_access_conditions'].if_none_match = '*' + if length is None or length < 0: + raise ValueError("A content length must be specified for a Page Blob.") + if length % 512 != 0: + raise ValueError("Invalid page blob size: {0}. " + "The size must be aligned to a 512-byte boundary.".format(length)) + tier = None + if kwargs.get('premium_page_blob_tier'): + premium_page_blob_tier = kwargs.pop('premium_page_blob_tier') + try: + tier = premium_page_blob_tier.value + except AttributeError: + tier = premium_page_blob_tier + + if encryption_options and encryption_options.get('key'): + cek, iv, encryption_data = generate_blob_encryption_data( + encryption_options['key'], + encryption_options['version']) + headers['x-ms-meta-encryptiondata'] = encryption_data + + blob_tags_string = kwargs.pop('blob_tags_string', None) + progress_hook = kwargs.pop('progress_hook', None) + + response = await client.create( + content_length=0, + blob_content_length=length, + blob_sequence_number=None, + blob_http_headers=kwargs.pop('blob_headers', None), + blob_tags_string=blob_tags_string, + tier=tier, + cls=return_response_headers, + headers=headers, + **kwargs) + if length == 0: + return response + + if encryption_options and encryption_options.get('key'): + if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1: + encryptor, padder = get_blob_encryptor_and_padder(cek, iv, False) + kwargs['encryptor'] = encryptor + kwargs['padder'] = padder + + kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag']) + return await upload_data_chunks( + service=client, + uploader_class=PageBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_page_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + progress_hook=progress_hook, + headers=headers, + **kwargs) + + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceModifiedError as mod_error: + if not overwrite: + _convert_mod_error(mod_error) + raise + + +async def upload_append_blob( # pylint: disable=unused-argument + client=None, + stream=None, + length=None, + overwrite=None, + headers=None, + validate_content=None, + max_concurrency=None, + blob_settings=None, + encryption_options=None, + **kwargs): + try: + if length == 0: + return {} + blob_headers = kwargs.pop('blob_headers', None) + append_conditions = AppendPositionAccessConditions( + max_size=kwargs.pop('maxsize_condition', None), + append_position=None) + blob_tags_string = kwargs.pop('blob_tags_string', None) + progress_hook = kwargs.pop('progress_hook', None) + + try: + if overwrite: + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + progress_hook=progress_hook, + headers=headers, + **kwargs) + except HttpResponseError as error: + if error.response.status_code != 404: + raise + # rewind the request body if it is a stream + if hasattr(stream, 'read'): + try: + # attempt to rewind the body to the initial position + stream.seek(0, SEEK_SET) + except UnsupportedOperation: + # if body is not seekable, then retry would not work + raise error + await client.create( + content_length=0, + blob_http_headers=blob_headers, + headers=headers, + blob_tags_string=blob_tags_string, + **kwargs) + return await upload_data_chunks( + service=client, + uploader_class=AppendBlobChunkUploader, + total_size=length, + chunk_size=blob_settings.max_block_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + append_position_access_conditions=append_conditions, + progress_hook=progress_hook, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/py.typed b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/blob/v2022_11_02/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/__init__.py new file mode 100644 index 00000000000..849489fca33 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/__init__.py @@ -0,0 +1 @@ +__import__('pkg_resources').declare_namespace(__name__) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/__init__.py new file mode 100644 index 00000000000..94734d2b383 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/__init__.py @@ -0,0 +1,97 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import warnings + +from ._version import VERSION +from ._file_client import ShareFileClient +from ._directory_client import ShareDirectoryClient +from ._share_client import ShareClient +from ._share_service_client import ShareServiceClient +from ._lease import ShareLeaseClient +from ._shared_access_signature import generate_account_sas, generate_share_sas, generate_file_sas +from ._shared.policies import ExponentialRetry, LinearRetry +from ._shared.models import ( + LocationMode, + ResourceTypes, + AccountSasPermissions, + StorageErrorCode) +from ._models import ( + ShareProperties, + DirectoryProperties, + Handle, + FileProperties, + Metrics, + RetentionPolicy, + CorsRule, + ShareSmbSettings, + SmbMultichannel, + ShareProtocolSettings, + ShareProtocols, + AccessPolicy, + FileSasPermissions, + ShareSasPermissions, + ContentSettings, + NTFSAttributes) +from ._generated.models import ( + ShareAccessTier, + ShareRootSquash +) + +__version__ = VERSION + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', + 'ExponentialRetry', + 'LinearRetry', + 'LocationMode', + 'ResourceTypes', + 'AccountSasPermissions', + 'StorageErrorCode', + 'Metrics', + 'RetentionPolicy', + 'CorsRule', + 'ShareSmbSettings', + 'ShareAccessTier', + 'SmbMultichannel', + 'ShareProtocolSettings', + 'AccessPolicy', + 'FileSasPermissions', + 'ShareSasPermissions', + 'ShareProtocols', + 'ShareProperties', + 'DirectoryProperties', + 'FileProperties', + 'ContentSettings', + 'Handle', + 'NTFSAttributes', + 'ShareRootSquash', + 'generate_account_sas', + 'generate_share_sas', + 'generate_file_sas' +] + + +def __getattr__(name): + """ + This function is added to deal with HandleItem which is a generated model that + was mistakenly added to the module exports. It has been removed import and __all__ + to prevent it from showing in intellisense/docs but we handle it here to prevent + breaking any existing code which may have imported it. + """ + if name == 'HandleItem': + from ._generated.models import HandleItem + warnings.warn( + "HandleItem is deprecated and should not be used. Use Handle instead.", + DeprecationWarning + ) + return HandleItem + + raise AttributeError(f"module 'azure.storage.fileshare' has no attribute {name}") diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_deserialize.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_deserialize.py new file mode 100644 index 00000000000..6839469f803 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_deserialize.py @@ -0,0 +1,83 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import ( # pylint: disable=unused-import + Tuple, Dict, List, + TYPE_CHECKING +) + +from ._models import ShareProperties, DirectoryProperties, FileProperties +from ._shared.response_handlers import deserialize_metadata +from ._generated.models import ShareFileRangeList + + +def deserialize_share_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + share_properties = ShareProperties( + metadata=metadata, + **headers + ) + return share_properties + + +def deserialize_directory_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + directory_properties = DirectoryProperties( + metadata=metadata, + **headers + ) + return directory_properties + + +def deserialize_file_properties(response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + file_properties = FileProperties( + metadata=metadata, + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-content-md5' in headers: + file_properties.content_settings.content_md5 = headers['x-ms-content-md5'] + else: + file_properties.content_settings.content_md5 = None + return file_properties + + +def deserialize_file_stream(response, obj, headers): + file_properties = deserialize_file_properties(response, obj, headers) + obj.properties = file_properties + return response.http_response.location_mode, obj + + +def deserialize_permission(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission + ''' + + return obj.permission + + +def deserialize_permission_key(response, obj, headers): # pylint: disable=unused-argument + ''' + Extracts out file permission key + ''' + + if response is None or headers is None: + return None + return headers.get('x-ms-file-permission-key', None) + + +def get_file_ranges_result(ranges): + # type: (ShareFileRangeList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + file_ranges = [] # type: ignore + clear_ranges = [] # type: List + if ranges.ranges: + file_ranges = [ + {'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] # type: ignore + if ranges.clear_ranges: + clear_ranges = [ + {'start': clear_range.start, 'end': clear_range.end} for clear_range in ranges.clear_ranges] + return file_ranges, clear_ranges diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_directory_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_directory_client.py new file mode 100644 index 00000000000..2e9e8eb4444 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_directory_client.py @@ -0,0 +1,991 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import functools +import sys +import time +from datetime import datetime +from typing import ( + Any, AnyStr, Dict, IO, Iterable, Optional, Union, + TYPE_CHECKING +) +from urllib.parse import urlparse, quote, unquote + +from typing_extensions import Self + +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.paging import ItemPaged +from azure.core.pipeline import Pipeline +from azure.core.tracing.decorator import distributed_trace +from ._generated import AzureFileStorage +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._deserialize import deserialize_directory_properties +from ._serialize import get_api_version, get_dest_access_conditions, get_rename_smb_properties +from ._file_client import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from ._models import DirectoryProperties, Handle, NTFSAttributes + + +class ShareDirectoryClient(StorageAccountHostsMixin): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( + self, account_url: str, + share_name: str, + directory_path: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.directory_path = directory_path + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareDirectoryClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @classmethod + def from_directory_url( + cls, directory_url: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create a ShareDirectoryClient from a directory url. + + :param str directory_url: + The full URI to the directory. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + try: + if not directory_url.lower().startswith('http'): + directory_url = "https://" + directory_url + except AttributeError: + raise ValueError("Directory URL must be a string.") + parsed_url = urlparse(directory_url.rstrip('/')) + if not parsed_url.path and not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(directory_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + path_snapshot, _ = parse_query(parsed_url.query) + + share_name, _, path_dir = parsed_url.path.lstrip('/').partition('/') + share_name = unquote(share_name) + + directory_path = path_dir + snapshot = snapshot or path_snapshot + + return cls( + account_url=account_url, share_name=share_name, directory_path=directory_path, + credential=credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, str): + share_name = share_name.encode('UTF-8') + directory_path = "" + if self.directory_path: + directory_path = "/" + quote(self.directory_path, safe='~') + return "{}://{}/{}{}{}".format( + self.scheme, + hostname, + quote(share_name), + directory_path, + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str: str, + share_name: str, + directory_path: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create ShareDirectoryClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str directory_path: + The directory path. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A directory client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, directory_path=directory_path, credential=credential, **kwargs) + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, + credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 12 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, **kwargs) + + @distributed_trace + def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "none" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + Creation time for the directory. Default value: "now". + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + Last write time for the directory. Default value: "now". + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword str file_permission: + If specified the permission (security descriptor) shall be set + for the directory/file. This header can be used if Permission size is + <= 8KB, else file-permission-key header shall be used. + Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword str file_permission_key: + Key of the permission to be set for the directory/file. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword file_change_time: + Change time for the directory. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 12 + :caption: Creates a directory. + """ + timeout = kwargs.pop('timeout', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + file_attributes = kwargs.pop('file_attributes', 'none') + file_creation_time = kwargs.pop('file_creation_time', 'now') + file_last_write_time = kwargs.pop('file_last_write_time', 'now') + file_change_time = kwargs.pop('file_change_time', None) + file_permission = kwargs.pop('file_permission', None) + file_permission_key = kwargs.pop('file_permission_key', None) + file_permission = _get_file_permission(file_permission, file_permission_key, 'inherit') + + try: + return self._client.directory.create( # type: ignore + file_attributes=str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=file_permission_key, + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 12 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + self._client.directory.delete(timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def rename_directory( + self, new_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> ShareDirectoryClient + """ + Rename the source directory. + + :param str new_name: + The new directory name. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool overwrite: + A boolean value for if the destination file already exists, whether this request will + overwrite the file or not. If true, the rename will succeed and will overwrite the + destination file. If not provided or if false and the destination file does exist, the + request will not overwrite the destination file. If provided and the destination file + doesn't exist, the rename will succeed. + :keyword bool ignore_read_only: + A boolean value that specifies whether the ReadOnly attribute on a preexisting destination + file should be respected. If true, the rename will succeed, otherwise, a previous file at the + destination with the ReadOnly attribute set will cause the rename to fail. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory. This header + can be used if Permission size is <= 8KB, else file_permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + A value of 'preserve' can be passed to preserve source permissions. + Note: Only one of the file_permission or file_permission_key should be specified. + :keyword str file_permission_key: + Key of the permission to be set for the directory. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword file_attributes: + The file system attributes for the directory. + :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str + :keyword file_creation_time: + Creation time for the directory. + :paramtype file_creation_time:~datetime.datetime or str + :keyword file_last_write_time: + Last write time for the file. + :paramtype file_last_write_time:~datetime.datetime or str + :keyword file_change_time: + Change time for the directory. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword Dict[str,str] metadata: + A name-value pair to associate with a file storage object. + :keyword destination_lease: + Required if the destination file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str + :returns: The new Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + if not new_name: + raise ValueError("Please specify a new directory name.") + + new_name = new_name.strip('/') + new_path_and_query = new_name.split('?') + new_dir_path = new_path_and_query[0] + if len(new_path_and_query) == 2: + new_dir_sas = new_path_and_query[1] or self._query_str.strip('?') + else: + new_dir_sas = self._query_str.strip('?') + + new_directory_client = ShareDirectoryClient( + '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_dir_path, + credential=new_dir_sas or self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent + ) + + kwargs.update(get_rename_smb_properties(kwargs)) + + timeout = kwargs.pop('timeout', None) + overwrite = kwargs.pop('overwrite', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + + destination_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) + + try: + new_directory_client._client.directory.rename( # pylint: disable=protected-access + self.url, + timeout=timeout, + replace_if_exists=overwrite, + destination_lease_access_conditions=destination_access_conditions, + headers=headers, + **kwargs) + + return new_directory_client + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], **Any) -> ItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.paging.ItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 12 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> ItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of Handle + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.Handle] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, Handle], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace + def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace + def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a directory exists and returns False otherwise. + + :kwarg int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: True if the directory exists, False otherwise. + :rtype: bool + """ + try: + self._client.directory.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace + def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Optional[Union[str, datetime]] + file_last_write_time="preserve", # type: Optional[Union[str, datetime]] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the directory. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + file_change_time = kwargs.pop('file_change_time', None) + try: + return self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def create_subdirectory( + self, directory_name, # type: str + **kwargs): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 12 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace + def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 12 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace + def upload_file( + self, file_name: str, + data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]], + length: Optional[int] = None, + **kwargs + ) -> ShareFileClient: + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword progress_hook: + A callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transferred + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], None] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 12 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + file_client.upload_file( + data, + length=length, + **kwargs) + return file_client + + @distributed_trace + def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 12 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + file_client.delete_file(**kwargs) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_download.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_download.py new file mode 100644 index 00000000000..080b8a83ef4 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_download.py @@ -0,0 +1,519 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import threading +import warnings +from io import BytesIO +from typing import Iterator + +from azure.core.exceptions import HttpResponseError, ResourceModifiedError +from azure.core.tracing.common import with_current_context +from ._shared.request_handlers import validate_and_format_range_headers +from ._shared.response_handlers import process_storage_error, parse_length_from_content_range + + +def process_content(data): + if data is None: + raise ValueError("Response cannot be None.") + + try: + return b"".join(list(data)) + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + + +class _ChunkDownloader(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + client=None, + total_size=None, + chunk_size=None, + current_progress=None, + start_range=None, + end_range=None, + stream=None, + parallel=None, + validate_content=None, + progress_hook=None, + etag=None, + **kwargs + ): + self.client = client + self.etag = etag + # Information on the download range/chunk size + self.chunk_size = chunk_size + self.total_size = total_size + self.start_index = start_range + self.end_index = end_range + + # The destination that we will write to + self.stream = stream + self.stream_lock = threading.Lock() if parallel else None + self.progress_lock = threading.Lock() if parallel else None + self.progress_hook = progress_hook + + # For a parallel download, the stream is always seekable, so we note down the current position + # in order to seek to the right place when out-of-order chunks come in + self.stream_start = stream.tell() if parallel else None + + # Download progress so far + self.progress_total = current_progress + + # Parameters for each get operation + self.validate_content = validate_content + self.request_options = kwargs + + def _calculate_range(self, chunk_start): + if chunk_start + self.chunk_size > self.end_index: + chunk_end = self.end_index + else: + chunk_end = chunk_start + self.chunk_size + return chunk_start, chunk_end + + def get_chunk_offsets(self): + index = self.start_index + while index < self.end_index: + yield index + index += self.chunk_size + + def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + self._write_to_stream(chunk_data, chunk_start) + self._update_progress(length) + + def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return self._download_chunk(chunk_start, chunk_end - 1) + + def _update_progress(self, length): + if self.progress_lock: + with self.progress_lock: # pylint: disable=not-context-manager + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + self.progress_hook(self.progress_total, self.total_size) + + def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + with self.stream_lock: # pylint: disable=not-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + def _download_chunk(self, chunk_start, chunk_end): + range_header, range_validation = validate_and_format_range_headers( + chunk_start, chunk_end, check_content_md5=self.validate_content + ) + + try: + _, response = self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + if response.properties.etag != self.etag: + raise ResourceModifiedError(message="The file has been modified while downloading.") + + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = process_content(response) + return chunk_data + + +class _ChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + return self + + def __next__(self): + """Iterate through responses.""" + if self._complete: + raise StopIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += self._iter_downloader.yield_chunk(chunk) + except StopIteration as e: + self._complete = True + if self._current_content: + return self._current_content + raise e + + return self._get_chunk_data() + + next = __next__ # Python 2 compatibility. + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if specified, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._progress_hook = kwargs.pop('progress_hook', None) + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + self._etag = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = ( + self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size + ) + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range = (initial_request_start, initial_request_end) + + self._response = self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = "bytes {0}-{1}/{2}".format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = process_content(self._response) + + def __len__(self): + return self.size + + def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content + ) + + try: + location_mode, response = self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options + ) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._file_size is None: + raise ValueError("Required Content-Range response header is missing or malformed.") + + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options + ) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + self._etag = response.properties.etag + return response + + def chunks(self): + # type: () -> Iterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: Iterator[bytes] + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the end range index unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + use_location=self._location_mode, + etag=self._etag, + **self._request_options + ) + return _ChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size) + + def readall(self): + # type: () -> bytes + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes + """ + stream = BytesIO() + self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + def content_as_bytes(self, max_concurrency=1): + """DEPRECATED: Download the contents of this file. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return self.readall() + + def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """DEPRECATED: Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return self.readall() + + def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # The stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._progress_hook: + self._progress_hook(len(self._current_content), self.size) + + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _ChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + use_location=self._location_mode, + progress_hook=self._progress_hook, + etag=self._etag, + **self._request_options + ) + if parallel: + import concurrent.futures + with concurrent.futures.ThreadPoolExecutor(self._max_concurrency) as executor: + list(executor.map( + with_current_context(downloader.process_chunk), + downloader.get_chunk_offsets() + )) + else: + for chunk in downloader.get_chunk_offsets(): + downloader.process_chunk(chunk) + return self.size + + def download_to_stream(self, stream, max_concurrency=1): + """DEPRECATED: Download the contents of this file to a stream. + + This method is deprecated, use func:`readinto` instead. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self.readinto(stream) + return self.properties diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_file_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_file_client.py new file mode 100644 index 00000000000..877ded13bd6 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_file_client.py @@ -0,0 +1,1695 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, too-many-public-methods +import functools +import sys +import time +from datetime import datetime +from io import BytesIO +from typing import ( + Any, AnyStr, Dict, IO, Iterable, List, Optional, Tuple, Union, + TYPE_CHECKING +) +from urllib.parse import urlparse, quote, unquote + +from typing_extensions import Self + +from azure.core.exceptions import HttpResponseError +from azure.core.paging import ItemPaged # pylint: disable=ungrouped-imports +from azure.core.tracing.decorator import distributed_trace +from ._generated import AzureFileStorage +from ._generated.models import FileHTTPHeaders +from ._shared.uploads import IterStreamer, FileChunkUploader, upload_data_chunks +from ._shared.base_client import StorageAccountHostsMixin, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, get_length +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._shared.parser import _str +from ._parser import _get_file_permission, _datetime_to_str +from ._lease import ShareLeaseClient +from ._serialize import ( + get_access_conditions, + get_api_version, + get_dest_access_conditions, + get_rename_smb_properties, + get_smb_properties, + get_source_conditions, + get_source_access_conditions) +from ._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result +from ._models import HandlesPaged +from ._download import StorageStreamDownloader + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from ._models import ContentSettings, FileProperties, Handle, NTFSAttributes + + +def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + progress_hook=None, + **kwargs): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = client.create_file( + size, + content_settings=content_settings, + metadata=metadata, + timeout=timeout, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + **kwargs + ) + if size == 0: + return response + + responses = upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + progress_hook=progress_hook, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except HttpResponseError as error: + process_storage_error(error) + + +class ShareFileClient(StorageAccountHostsMixin): + """A client to interact with a specific file, although that file may not yet exist. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( + self, account_url: str, + share_name: str, + file_path: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not (share_name and file_path): + raise ValueError("Please specify a share name and file name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self.file_path = file_path.split('/') + self.file_name = self.file_path[-1] + self.directory_path = "/".join(self.file_path[:-1]) + + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareFileClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @classmethod + def from_file_url( + cls, file_url: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """A client to interact with a specific file, although that file may not yet exist. + + :param str file_url: The full URI to the file. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + try: + if not file_url.lower().startswith('http'): + file_url = "https://" + file_url + except AttributeError: + raise ValueError("File URL must be a string.") + parsed_url = urlparse(file_url.rstrip('/')) + + if not (parsed_url.netloc and parsed_url.path): + raise ValueError("Invalid URL: {}".format(file_url)) + account_url = parsed_url.netloc.rstrip('/') + "?" + parsed_url.query + + path_share, _, path_file = parsed_url.path.lstrip('/').partition('/') + path_snapshot, _ = parse_query(parsed_url.query) + snapshot = snapshot or path_snapshot + share_name = unquote(path_share) + file_path = '/'.join([unquote(p) for p in path_file.split('/')]) + return cls(account_url, share_name, file_path, snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, str): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + "/".join([quote(p, safe='~') for p in self.file_path]), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str: str, + share_name: str, + file_path: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create ShareFileClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str file_path: + The file path. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A File client. + :rtype: ~azure.storage.fileshare.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START create_file_client] + :end-before: [END create_file_client] + :language: python + :dedent: 12 + :caption: Creates the file client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, credential=credential, **kwargs) + + @distributed_trace + def acquire_lease(self, lease_id=None, **kwargs): + # type: (Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START acquire_and_release_lease_on_file] + :end-before: [END acquire_and_release_lease_on_file] + :language: python + :dedent: 12 + :caption: Acquiring a lease on a file. + """ + kwargs['lease_duration'] = -1 + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(**kwargs) + return lease + + @distributed_trace + def create_file( # type: ignore + self, size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Optional[Union[str, datetime]] + file_last_write_time="now", # type: Optional[Union[str, datetime]] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 12 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + file_change_time = kwargs.pop('file_change_time', None) + try: + return self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def upload_file( + self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]], + length: Optional[int] = None, + file_attributes: Union[str, "NTFSAttributes"] = "none", + file_creation_time: Optional[Union[str, datetime]] = "now", + file_last_write_time: Optional[Union[str, datetime]] = "now", + file_permission: Optional[str] = None, + permission_key: Optional[str] = None, + **kwargs + ) -> Dict[str, Any]: + """Uploads a new file. + + :param data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword progress_hook: + A callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transferred + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], None] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 12 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + progress_hook = kwargs.pop('progress_hook', None) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + + if isinstance(data, str): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, 'read'): + stream = data + elif hasattr(data, '__iter__'): + stream = IterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return _upload_file_helper( + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + progress_hook=progress_hook, + **kwargs) + + @distributed_trace + def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.9.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 12 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id['copy_id'] + except TypeError: + pass + try: + self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def download_file( + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the file into + a stream. Using chunks() returns an iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword progress_hook: + A callback to track the progress of a long running download. The signature is + function(current: int, total: int) where current is the number of bytes transferred + so far, and total is the total size of the download. + :paramtype progress_hook: Callable[[int, int], None] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.fileshare.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 12 + :caption: Download a file. + """ + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + return StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs) + + @distributed_trace + def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 12 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def rename_file( + self, new_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """ + Rename the source file. + + :param str new_name: + The new file name. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool overwrite: + A boolean value for if the destination file already exists, whether this request will + overwrite the file or not. If true, the rename will succeed and will overwrite the + destination file. If not provided or if false and the destination file does exist, the + request will not overwrite the destination file. If provided and the destination file + doesn't exist, the rename will succeed. + :keyword bool ignore_read_only: + A boolean value that specifies whether the ReadOnly attribute on a preexisting destination + file should be respected. If true, the rename will succeed, otherwise, a previous file at the + destination with the ReadOnly attribute set will cause the rename to fail. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the file. This header + can be used if Permission size is <= 8KB, else file_permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + A value of 'preserve' can be passed to preserve source permissions. + Note: Only one of the file_permission or file_permission_key should be specified. + :keyword str file_permission_key: + Key of the permission to be set for the file. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword file_attributes: + The file system attributes for the file. + :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str + :keyword file_creation_time: + Creation time for the file. + :paramtype file_creation_time:~datetime.datetime or str + :keyword file_last_write_time: + Last write time for the file. + :paramtype file_last_write_time:~datetime.datetime or str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword str content_type: + The Content Type of the new file. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :keyword Dict[str,str] metadata: + A name-value pair to associate with a file storage object. + :keyword source_lease: + Required if the source file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype source_lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword destination_lease: + Required if the destination file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str + :returns: The new File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if not new_name: + raise ValueError("Please specify a new file name.") + + new_name = new_name.strip('/') + new_path_and_query = new_name.split('?') + new_file_path = new_path_and_query[0] + if len(new_path_and_query) == 2: + new_file_sas = new_path_and_query[1] or self._query_str.strip('?') + else: + new_file_sas = self._query_str.strip('?') + + new_file_client = ShareFileClient( + '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_file_path, + credential=new_file_sas or self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent + ) + + kwargs.update(get_rename_smb_properties(kwargs)) + + file_http_headers = None + content_type = kwargs.pop('content_type', None) + if content_type: + file_http_headers = FileHTTPHeaders( + file_content_type=content_type + ) + + timeout = kwargs.pop('timeout', None) + overwrite = kwargs.pop('overwrite', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + + source_access_conditions = get_source_access_conditions(kwargs.pop('source_lease', None)) + dest_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) + + try: + new_file_client._client.file.rename( # pylint: disable=protected-access + self.url, + timeout=timeout, + replace_if_exists=overwrite, + file_http_headers=file_http_headers, + source_lease_access_conditions=source_access_conditions, + destination_lease_access_conditions=dest_access_conditions, + headers=headers, + **kwargs) + + return new_file_client + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = '/'.join(self.file_path) + return file_props # type: ignore + + @distributed_trace + def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Optional[Union[str, datetime]] + file_last_write_time="preserve", # type: Optional[Union[str, datetime]] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop('size', None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + file_change_time = kwargs.pop('file_change_time', None) + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_file_metadata(self, metadata=None, **kwargs): + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.file.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + metadata=metadata, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def upload_range( # type: ignore + self, data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword file_last_write_mode: + If the file last write time should be preserved or overwritten. Possible values + are "preserve" or "now". If not specified, file last write time will be changed to + the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_last_write_mode: Literal["preserve", "now"] + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + file_last_write_mode = kwargs.pop('file_last_write_mode', None) + if isinstance(data, str): + data = data.encode(encoding) + + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + file_last_written_mode=file_last_write_mode, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @staticmethod + def _upload_range_from_url_options(source_url, # type: str + offset, # type: int + length, # type: int + source_offset, # type: int + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + + if offset is None: + raise ValueError("offset must be provided.") + if length is None: + raise ValueError("length must be provided.") + if source_offset is None: + raise ValueError("source_offset must be provided.") + + # Format range + end_range = offset + length - 1 + destination_range = 'bytes={0}-{1}'.format(offset, end_range) + source_range = 'bytes={0}-{1}'.format(source_offset, source_offset + length - 1) + source_authorization = kwargs.pop('source_authorization', None) + source_mod_conditions = get_source_conditions(kwargs) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + file_last_write_mode = kwargs.pop('file_last_write_mode', None) + + options = { + 'copy_source_authorization': source_authorization, + 'copy_source': source_url, + 'content_length': 0, + 'source_range': source_range, + 'range': destination_range, + 'file_last_written_mode': file_last_write_mode, + 'source_modified_access_conditions': source_mod_conditions, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'cls': return_response_headers} + options.update(kwargs) + return options + + @distributed_trace + def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword file_last_write_mode: + If the file last write time should be preserved or overwritten. Possible values + are "preserve" or "now". If not specified, file last write time will be changed to + the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_last_write_mode: Literal["preserve", "now"] + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return self._client.file.upload_range_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + def _get_ranges_options( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + previous_sharesnapshot=None, # type: Optional[Union[str, Dict[str, Any]]] + **kwargs + ): + # type: (...) -> Dict[str, Any] + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + content_range = None + if offset is not None: + if length is not None: + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + else: + content_range = 'bytes={0}-'.format(offset) + options = { + 'sharesnapshot': self.snapshot, + 'lease_access_conditions': access_conditions, + 'timeout': kwargs.pop('timeout', None), + 'range': content_range} + if previous_sharesnapshot: + try: + options['prevsharesnapshot'] = previous_sharesnapshot.snapshot # type: ignore + except AttributeError: + try: + options['prevsharesnapshot'] = previous_sharesnapshot['snapshot'] # type: ignore + except TypeError: + options['prevsharesnapshot'] = previous_sharesnapshot + options.update(kwargs) + return options + + @distributed_trace + def get_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A list of valid ranges. + :rtype: List[dict[str, int]] + """ + options = self._get_ranges_options( + offset=offset, + length=length, + **kwargs) + try: + ranges = self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] + + def get_ranges_diff( # type: ignore + self, + previous_sharesnapshot, # type: Union[str, Dict[str, Any]] + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + .. versionadded:: 12.6.0 + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :param str previous_sharesnapshot: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous file snapshot to be compared + against a more recent snapshot or the current file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. + The first element are filled file ranges, the 2nd element is cleared file ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_ranges_options( + offset=offset, + length=length, + previous_sharesnapshot=previous_sharesnapshot, + **kwargs) + try: + ranges = self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_file_ranges_result(ranges) + + @distributed_trace + def clear_range( # type: ignore + self, offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + try: + return self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + optionalbody=None, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> ItemPaged[Handle] + """Lists handles for file. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of Handle + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.Handle] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return ItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace + def close_handle(self, handle, **kwargs): + # type: (Union[str, Handle], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/__init__.py new file mode 100644 index 00000000000..0b86b0a3392 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage import AzureFileStorage + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AzureFileStorage", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_azure_file_storage.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_azure_file_storage.py new file mode 100644 index 00000000000..cf0f4795dbd --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_azure_file_storage.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Optional, Union + +from azure.core import PipelineClient +from azure.core.rest import HttpRequest, HttpResponse + +from . import models as _models +from ._configuration import AzureFileStorageConfiguration +from ._serialization import Deserializer, Serializer +from .operations import DirectoryOperations, FileOperations, ServiceOperations, ShareOperations + + +class AzureFileStorage: # pylint: disable=client-accepts-api-version-keyword + """AzureFileStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.fileshare.operations.ServiceOperations + :ivar share: ShareOperations operations + :vartype share: azure.storage.fileshare.operations.ShareOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.fileshare.operations.DirectoryOperations + :ivar file: FileOperations operations + :vartype file: azure.storage.fileshare.operations.FileOperations + :param url: The URL of the service account, share, directory or file that is the target of the + desired operation. Required. + :type url: str + :param base_url: Service URL. Required. Default value is "". + :type base_url: str + :param file_request_intent: Valid value is backup. "backup" Default value is None. + :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent + :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + Default value is None. + :type allow_trailing_dot: bool + :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source + URI. Default value is None. + :type allow_source_trailing_dot: bool + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2022-11-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes + downloaded from the source url into the specified range. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype file_range_write_from_url: str + """ + + def __init__( # pylint: disable=missing-client-constructor-parameter-credential + self, + url: str, + base_url: str = "", + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + **kwargs: Any + ) -> None: + self._config = AzureFileStorageConfiguration( + url=url, + file_request_intent=file_request_intent, + allow_trailing_dot=allow_trailing_dot, + allow_source_trailing_dot=allow_source_trailing_dot, + **kwargs + ) + self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations(self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations(self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations(self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> "AzureFileStorage": + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_configuration.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_configuration.py new file mode 100644 index 00000000000..d39d9778a00 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_configuration.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import sys +from typing import Any, Optional, Union + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from . import models as _models + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports + +VERSION = "unknown" + + +class AzureFileStorageConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for AzureFileStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, share, directory or file that is the target of the + desired operation. Required. + :type url: str + :param file_request_intent: Valid value is backup. "backup" Default value is None. + :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent + :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + Default value is None. + :type allow_trailing_dot: bool + :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source + URI. Default value is None. + :type allow_source_trailing_dot: bool + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2022-11-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes + downloaded from the source url into the specified range. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype file_range_write_from_url: str + """ + + def __init__( + self, + url: str, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + **kwargs: Any + ) -> None: + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + version: Literal["2022-11-02"] = kwargs.pop("version", "2022-11-02") + file_range_write_from_url: Literal["update"] = kwargs.pop("file_range_write_from_url", "update") + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + self.url = url + self.file_request_intent = file_request_intent + self.allow_trailing_dot = allow_trailing_dot + self.allow_source_trailing_dot = allow_source_trailing_dot + self.version = version + self.file_range_write_from_url = file_range_write_from_url + kwargs.setdefault("sdk_moniker", "azurefilestorage/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_patch.py new file mode 100644 index 00000000000..f99e77fef98 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_patch.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# This file is used for handwritten extensions to the generated code. Example: +# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md +def patch_sdk(): + pass diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_serialization.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_serialization.py new file mode 100644 index 00000000000..f17c068e833 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_serialization.py @@ -0,0 +1,1996 @@ +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# pylint: skip-file +# pyright: reportUnnecessaryTypeIgnoreComment=false + +from base64 import b64decode, b64encode +import calendar +import datetime +import decimal +import email +from enum import Enum +import json +import logging +import re +import sys +import codecs +from typing import ( + Dict, + Any, + cast, + Optional, + Union, + AnyStr, + IO, + Mapping, + Callable, + TypeVar, + MutableMapping, + Type, + List, + Mapping, +) + +try: + from urllib import quote # type: ignore +except ImportError: + from urllib.parse import quote +import xml.etree.ElementTree as ET + +import isodate # type: ignore + +from azure.core.exceptions import DeserializationError, SerializationError, raise_with_traceback +from azure.core.serialization import NULL as AzureCoreNull + +_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") + +ModelType = TypeVar("ModelType", bound="Model") +JSON = MutableMapping[str, Any] + + +class RawDeserializer: + + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") + + # Name used in context + CONTEXT_NAME = "deserialized_data" + + @classmethod + def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: + """Decode data according to content-type. + + Accept a stream of data as well, but will be load at once in memory for now. + + If no content-type, will return the string version (not bytes, not stream) + + :param data: Input, could be bytes or stream (will be decoded with UTF8) or text + :type data: str or bytes or IO + :param str content_type: The content type. + """ + if hasattr(data, "read"): + # Assume a stream + data = cast(IO, data).read() + + if isinstance(data, bytes): + data_as_str = data.decode(encoding="utf-8-sig") + else: + # Explain to mypy the correct type. + data_as_str = cast(str, data) + + # Remove Byte Order Mark if present in string + data_as_str = data_as_str.lstrip(_BOM) + + if content_type is None: + return data + + if cls.JSON_REGEXP.match(content_type): + try: + return json.loads(data_as_str) + except ValueError as err: + raise DeserializationError("JSON is invalid: {}".format(err), err) + elif "xml" in (content_type or []): + try: + + try: + if isinstance(data, unicode): # type: ignore + # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string + data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore + except NameError: + pass + + return ET.fromstring(data_as_str) # nosec + except ET.ParseError: + # It might be because the server has an issue, and returned JSON with + # content-type XML.... + # So let's try a JSON load, and if it's still broken + # let's flow the initial exception + def _json_attemp(data): + try: + return True, json.loads(data) + except ValueError: + return False, None # Don't care about this one + + success, json_result = _json_attemp(data) + if success: + return json_result + # If i'm here, it's not JSON, it's not XML, let's scream + # and raise the last context in this block (the XML exception) + # The function hack is because Py2.7 messes up with exception + # context otherwise. + _LOGGER.critical("Wasn't XML not JSON, failing") + raise_with_traceback(DeserializationError, "XML is invalid") + raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) + + @classmethod + def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: + """Deserialize from HTTP response. + + Use bytes and headers to NOT use any requests/aiohttp or whatever + specific implementation. + Headers will tested for "content-type" + """ + # Try to use content-type from headers if available + content_type = None + if "content-type" in headers: + content_type = headers["content-type"].split(";")[0].strip().lower() + # Ouch, this server did not declare what it sent... + # Let's guess it's JSON... + # Also, since Autorest was considering that an empty body was a valid JSON, + # need that test as well.... + else: + content_type = "application/json" + + if body_bytes: + return cls.deserialize_from_text(body_bytes, content_type) + return None + + +try: + basestring # type: ignore + unicode_str = unicode # type: ignore +except NameError: + basestring = str + unicode_str = str + +_LOGGER = logging.getLogger(__name__) + +try: + _long_type = long # type: ignore +except NameError: + _long_type = int + + +class UTC(datetime.tzinfo): + """Time Zone info for handling UTC""" + + def utcoffset(self, dt): + """UTF offset for UTC is 0.""" + return datetime.timedelta(0) + + def tzname(self, dt): + """Timestamp representation.""" + return "Z" + + def dst(self, dt): + """No daylight saving for UTC.""" + return datetime.timedelta(hours=1) + + +try: + from datetime import timezone as _FixedOffset # type: ignore +except ImportError: # Python 2.7 + + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ + + def __init__(self, offset): + self.__offset = offset + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds() / 3600) + + def __repr__(self): + return "".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) + + +try: + from datetime import timezone + + TZ_UTC = timezone.utc +except ImportError: + TZ_UTC = UTC() # type: ignore + +_FLATTEN = re.compile(r"(? None: + self.additional_properties: Dict[str, Any] = {} + for k in kwargs: + if k not in self._attribute_map: + _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) + elif k in self._validation and self._validation[k].get("readonly", False): + _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) + else: + setattr(self, k, kwargs[k]) + + def __eq__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other: Any) -> bool: + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self) -> str: + return str(self.__dict__) + + @classmethod + def enable_additional_properties_sending(cls) -> None: + cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} + + @classmethod + def is_xml_model(cls) -> bool: + try: + cls._xml_map # type: ignore + except AttributeError: + return False + return True + + @classmethod + def _create_xml_node(cls): + """Create XML node.""" + try: + xml_map = cls._xml_map # type: ignore + except AttributeError: + xml_map = {} + + return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) + + def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: + """Return the JSON that would be sent to azure from this model. + + This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param bool keep_readonly: If you want to serialize the readonly attributes + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, keep_readonly=keep_readonly, **kwargs) + + def as_dict( + self, + keep_readonly: bool = True, + key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, + **kwargs: Any + ) -> JSON: + """Return a dict that can be serialized using json.dump. + + Advanced usage might optionally use a callback as parameter: + + .. code::python + + def my_key_transformer(key, attr_desc, value): + return key + + Key is the attribute name used in Python. Attr_desc + is a dict of metadata. Currently contains 'type' with the + msrest type and 'key' with the RestAPI encoded key. + Value is the current value in this object. + + The string returned will be used to serialize the key. + If the return type is a list, this is considered hierarchical + result dict. + + See the three examples in this file: + + - attribute_transformer + - full_restapi_key_transformer + - last_restapi_key_transformer + + If you want XML serialization, you can pass the kwargs is_xml=True. + + :param function key_transformer: A key transformer function. + :returns: A dict JSON compatible object + :rtype: dict + """ + serializer = Serializer(self._infer_class_models()) + return serializer._serialize(self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs) + + @classmethod + def _infer_class_models(cls): + try: + str_models = cls.__module__.rsplit(".", 1)[0] + models = sys.modules[str_models] + client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} + if cls.__name__ not in client_models: + raise ValueError("Not Autorest generated code") + except Exception: + # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. + client_models = {cls.__name__: cls} + return client_models + + @classmethod + def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: + """Parse a str using the RestAPI syntax and return a model. + + :param str data: A str using RestAPI structure. JSON by default. + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + return deserializer(cls.__name__, data, content_type=content_type) + + @classmethod + def from_dict( + cls: Type[ModelType], + data: Any, + key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, + content_type: Optional[str] = None, + ) -> ModelType: + """Parse a dict using given key extractor return a model. + + By default consider key + extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor + and last_rest_key_case_insensitive_extractor) + + :param dict data: A dict using RestAPI structure + :param str content_type: JSON by default, set application/xml if XML. + :returns: An instance of this model + :raises: DeserializationError if something went wrong + """ + deserializer = Deserializer(cls._infer_class_models()) + deserializer.key_extractors = ( # type: ignore + [ # type: ignore + attribute_key_case_insensitive_extractor, + rest_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + if key_extractors is None + else key_extractors + ) + return deserializer(cls.__name__, data, content_type=content_type) + + @classmethod + def _flatten_subtype(cls, key, objects): + if "_subtype_map" not in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) + return result + + @classmethod + def _classify(cls, response, objects): + """Check the class _subtype_map for any child classes. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. + """ + for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): + subtype_value = None + + if not isinstance(response, ET.Element): + rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + else: + subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) + if subtype_value: + # Try to match base class. Can be class name only + # (bug to fix in Autorest to support x-ms-discriminator-name) + if cls.__name__ == subtype_value: + return cls + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + try: + return objects[flatten_mapping_type[subtype_value]] # type: ignore + except KeyError: + _LOGGER.warning( + "Subtype value %s has no mapping, use base class %s.", + subtype_value, + cls.__name__, + ) + break + else: + _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) + break + return cls + + @classmethod + def _get_rest_key_parts(cls, attr_key): + """Get the RestAPI key of this attr, split it and decode part + :param str attr_key: Attribute key must be in attribute_map. + :returns: A list of RestAPI part + :rtype: list + """ + rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) + return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] + + +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + + :param str key: A key string from the generated code + """ + return key.replace("\\.", ".") + + +class Serializer(object): + """Request object model serializer.""" + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} + days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} + months = { + 1: "Jan", + 2: "Feb", + 3: "Mar", + 4: "Apr", + 5: "May", + 6: "Jun", + 7: "Jul", + 8: "Aug", + 9: "Sep", + 10: "Oct", + 11: "Nov", + 12: "Dec", + } + validation = { + "min_length": lambda x, y: len(x) < y, + "max_length": lambda x, y: len(x) > y, + "minimum": lambda x, y: x < y, + "maximum": lambda x, y: x > y, + "minimum_ex": lambda x, y: x <= y, + "maximum_ex": lambda x, y: x >= y, + "min_items": lambda x, y: len(x) < y, + "max_items": lambda x, y: len(x) > y, + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), + "unique": lambda x, y: len(x) != len(set(x)), + "multiple": lambda x, y: x % y != 0, + } + + def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]] = None): + self.serialize_type = { + "iso-8601": Serializer.serialize_iso, + "rfc-1123": Serializer.serialize_rfc, + "unix-time": Serializer.serialize_unix, + "duration": Serializer.serialize_duration, + "date": Serializer.serialize_date, + "time": Serializer.serialize_time, + "decimal": Serializer.serialize_decimal, + "long": Serializer.serialize_long, + "bytearray": Serializer.serialize_bytearray, + "base64": Serializer.serialize_base64, + "object": self.serialize_object, + "[]": self.serialize_iter, + "{}": self.serialize_dict, + } + self.dependencies: Dict[str, Type[ModelType]] = dict(classes) if classes else {} + self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True + + def _serialize(self, target_obj, data_type=None, **kwargs): + """Serialize data into a string according to type. + + :param target_obj: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str, dict + :raises: SerializationError if serialization fails. + """ + key_transformer = kwargs.get("key_transformer", self.key_transformer) + keep_readonly = kwargs.get("keep_readonly", False) + if target_obj is None: + return None + + attr_name = None + class_name = target_obj.__class__.__name__ + + if data_type: + return self.serialize_data(target_obj, data_type, **kwargs) + + if not hasattr(target_obj, "_attribute_map"): + data_type = type(target_obj).__name__ + if data_type in self.basic_types.values(): + return self.serialize_data(target_obj, data_type, **kwargs) + + # Force "is_xml" kwargs if we detect a XML model + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) + + serialized = {} + if is_xml_model_serialization: + serialized = target_obj._create_xml_node() + try: + attributes = target_obj._attribute_map + for attr, attr_desc in attributes.items(): + attr_name = attr + if not keep_readonly and target_obj._validation.get(attr_name, {}).get("readonly", False): + continue + + if attr_name == "additional_properties" and attr_desc["key"] == "": + if target_obj.additional_properties is not None: + serialized.update(target_obj.additional_properties) + continue + try: + + orig_attr = getattr(target_obj, attr) + if is_xml_model_serialization: + pass # Don't provide "transformer" for XML for now. Keep "orig_attr" + else: # JSON + keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) + keys = keys if isinstance(keys, list) else [keys] + + kwargs["serialization_ctxt"] = attr_desc + new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) + + if is_xml_model_serialization: + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + xml_prefix = xml_desc.get("prefix", None) + xml_ns = xml_desc.get("ns", None) + if xml_desc.get("attr", False): + if xml_ns: + ET.register_namespace(xml_prefix, xml_ns) + xml_name = "{}{}".format(xml_ns, xml_name) + serialized.set(xml_name, new_attr) # type: ignore + continue + if xml_desc.get("text", False): + serialized.text = new_attr # type: ignore + continue + if isinstance(new_attr, list): + serialized.extend(new_attr) # type: ignore + elif isinstance(new_attr, ET.Element): + # If the down XML has no XML/Name, we MUST replace the tag with the local tag. But keeping the namespaces. + if "name" not in getattr(orig_attr, "_xml_map", {}): + splitted_tag = new_attr.tag.split("}") + if len(splitted_tag) == 2: # Namespace + new_attr.tag = "}".join([splitted_tag[0], xml_name]) + else: + new_attr.tag = xml_name + serialized.append(new_attr) # type: ignore + else: # That's a basic type + # Integrate namespace if necessary + local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) + local_node.text = unicode_str(new_attr) + serialized.append(local_node) # type: ignore + else: # JSON + for k in reversed(keys): # type: ignore + new_attr = {k: new_attr} + + _new_attr = new_attr + _serialized = serialized + for k in keys: # type: ignore + if k not in _serialized: + _serialized.update(_new_attr) # type: ignore + _new_attr = _new_attr[k] # type: ignore + _serialized = _serialized[k] + except ValueError: + continue + + except (AttributeError, KeyError, TypeError) as err: + msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) + raise_with_traceback(SerializationError, msg, err) + else: + return serialized + + def body(self, data, data_type, **kwargs): + """Serialize data intended for a request body. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: dict + :raises: SerializationError if serialization fails. + :raises: ValueError if data is None + """ + + # Just in case this is a dict + internal_data_type_str = data_type.strip("[]{}") + internal_data_type = self.dependencies.get(internal_data_type_str, None) + try: + is_xml_model_serialization = kwargs["is_xml"] + except KeyError: + if internal_data_type and issubclass(internal_data_type, Model): + is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) + else: + is_xml_model_serialization = False + if internal_data_type and not isinstance(internal_data_type, Enum): + try: + deserializer = Deserializer(self.dependencies) + # Since it's on serialization, it's almost sure that format is not JSON REST + # We're not able to deal with additional properties for now. + deserializer.additional_properties_detection = False + if is_xml_model_serialization: + deserializer.key_extractors = [ # type: ignore + attribute_key_case_insensitive_extractor, + ] + else: + deserializer.key_extractors = [ + rest_key_case_insensitive_extractor, + attribute_key_case_insensitive_extractor, + last_rest_key_case_insensitive_extractor, + ] + data = deserializer._deserialize(data_type, data) + except DeserializationError as err: + raise_with_traceback(SerializationError, "Unable to build a model: " + str(err), err) + + return self._serialize(data, data_type, **kwargs) + + def url(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL path. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return output + + def query(self, name, data, data_type, **kwargs): + """Serialize data intended for a URL query. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + # Treat the list aside, since we don't want to encode the div separator + if data_type.startswith("["): + internal_data_type = data_type[1:-1] + data = [self.serialize_data(d, internal_data_type, **kwargs) if d is not None else "" for d in data] + if not kwargs.get("skip_quote", False): + data = [quote(str(d), safe="") for d in data] + return str(self.serialize_iter(data, internal_data_type, **kwargs)) + + # Not a list, regular serialization + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + if kwargs.get("skip_quote") is True: + output = str(output) + else: + output = quote(str(output), safe="") + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def header(self, name, data, data_type, **kwargs): + """Serialize data intended for a request header. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :rtype: str + :raises: TypeError if serialization fails. + :raises: ValueError if data is None + """ + try: + if data_type in ["[str]"]: + data = ["" if d is None else d for d in data] + + output = self.serialize_data(data, data_type, **kwargs) + if data_type == "bool": + output = json.dumps(output) + except SerializationError: + raise TypeError("{} must be type {}.".format(name, data_type)) + else: + return str(output) + + def serialize_data(self, data, data_type, **kwargs): + """Serialize generic data according to supplied data type. + + :param data: The data to be serialized. + :param str data_type: The type to be serialized from. + :param bool required: Whether it's essential that the data not be + empty or None + :raises: AttributeError if required data is None. + :raises: ValueError if data is None + :raises: SerializationError if serialization fails. + """ + if data is None: + raise ValueError("No value for given attribute") + + try: + if data is AzureCoreNull: + return None + if data_type in self.basic_types.values(): + return self.serialize_basic(data, data_type, **kwargs) + + elif data_type in self.serialize_type: + return self.serialize_type[data_type](data, **kwargs) + + # If dependencies is empty, try with current data class + # It has to be a subclass of Enum anyway + enum_type = self.dependencies.get(data_type, data.__class__) + if issubclass(enum_type, Enum): + return Serializer.serialize_enum(data, enum_obj=enum_type) + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.serialize_type: + return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) + + except (ValueError, TypeError) as err: + msg = "Unable to serialize value: {!r} as type: {!r}." + raise_with_traceback(SerializationError, msg.format(data, data_type), err) + else: + return self._serialize(data, **kwargs) + + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): + custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) + if custom_serializer: + return custom_serializer + if kwargs.get("is_xml", False): + return cls._xml_basic_types_serializers.get(data_type) + + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): + """Serialize basic builting data type. + Serializes objects to str, int, float or bool. + + Possible kwargs: + - basic_types_serializers dict[str, callable] : If set, use the callable as serializer + - is_xml bool : If set, use xml_basic_types_serializers + + :param data: Object to be serialized. + :param str data_type: Type of object in the iterable. + """ + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) + if custom_serializer: + return custom_serializer(data) + if data_type == "str": + return cls.serialize_unicode(data) + return eval(data_type)(data) # nosec + + @classmethod + def serialize_unicode(cls, data): + """Special handling for serializing unicode strings in Py2. + Encode to UTF-8 if unicode, otherwise handle as a str. + + :param data: Object to be serialized. + :rtype: str + """ + try: # If I received an enum, return its value + return data.value + except AttributeError: + pass + + try: + if isinstance(data, unicode): # type: ignore + # Don't change it, JSON and XML ElementTree are totally able + # to serialize correctly u'' strings + return data + except NameError: + return str(data) + else: + return str(data) + + def serialize_iter(self, data, iter_type, div=None, **kwargs): + """Serialize iterable. + + Supported kwargs: + - serialization_ctxt dict : The current entry of _attribute_map, or same format. + serialization_ctxt['type'] should be same as data_type. + - is_xml bool : If set, serialize as XML + + :param list attr: Object to be serialized. + :param str iter_type: Type of object in the iterable. + :param bool required: Whether the objects in the iterable must + not be None or empty. + :param str div: If set, this str will be used to combine the elements + in the iterable into a combined string. Default is 'None'. + :rtype: list, str + """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") + + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + is_xml = kwargs.get("is_xml", False) + + serialized = [] + for d in data: + try: + serialized.append(self.serialize_data(d, iter_type, **kwargs)) + except ValueError: + serialized.append(None) + + if div: + serialized = ["" if s is None else str(s) for s in serialized] + serialized = div.join(serialized) + + if "xml" in serialization_ctxt or is_xml: + # XML serialization is more complicated + xml_desc = serialization_ctxt.get("xml", {}) + xml_name = xml_desc.get("name") + if not xml_name: + xml_name = serialization_ctxt["key"] + + # Create a wrap node if necessary (use the fact that Element and list have "append") + is_wrapped = xml_desc.get("wrapped", False) + node_name = xml_desc.get("itemsName", xml_name) + if is_wrapped: + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + else: + final_result = [] + # All list elements to "local_node" + for el in serialized: + if isinstance(el, ET.Element): + el_node = el + else: + el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + if el is not None: # Otherwise it writes "None" :-p + el_node.text = str(el) + final_result.append(el_node) + return final_result + return serialized + + def serialize_dict(self, attr, dict_type, **kwargs): + """Serialize a dictionary of objects. + + :param dict attr: Object to be serialized. + :param str dict_type: Type of object in the dictionary. + :param bool required: Whether the objects in the dictionary must + not be None or empty. + :rtype: dict + """ + serialization_ctxt = kwargs.get("serialization_ctxt", {}) + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + + if "xml" in serialization_ctxt: + # XML serialization is more complicated + xml_desc = serialization_ctxt["xml"] + xml_name = xml_desc["name"] + + final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) + for key, value in serialized.items(): + ET.SubElement(final_result, key).text = value + return final_result + + return serialized + + def serialize_object(self, attr, **kwargs): + """Serialize a generic object. + This will be handled as a dictionary. If object passed in is not + a basic type (str, int, float, dict, list) it will simply be + cast to str. + + :param dict attr: Object to be serialized. + :rtype: dict or str + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + return attr + obj_type = type(attr) + if obj_type in self.basic_types: + return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) + if obj_type is _long_type: + return self.serialize_long(attr) + if obj_type is unicode_str: + return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) + + # If it's a model or I know this dependency, serialize as a Model + elif obj_type in self.dependencies.values() or isinstance(attr, Model): + return self._serialize(attr) + + if obj_type == dict: + serialized = {} + for key, value in attr.items(): + try: + serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) + except ValueError: + serialized[self.serialize_unicode(key)] = None + return serialized + + if obj_type == list: + serialized = [] + for obj in attr: + try: + serialized.append(self.serialize_object(obj, **kwargs)) + except ValueError: + pass + return serialized + return str(attr) + + @staticmethod + def serialize_enum(attr, enum_obj=None): + try: + result = attr.value + except AttributeError: + result = attr + try: + enum_obj(result) # type: ignore + return result + except ValueError: + for enum_value in enum_obj: # type: ignore + if enum_value.value.lower() == str(attr).lower(): + return enum_value.value + error = "{!r} is not valid value for enum {!r}" + raise SerializationError(error.format(attr, enum_obj)) + + @staticmethod + def serialize_bytearray(attr, **kwargs): + """Serialize bytearray into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + return b64encode(attr).decode() + + @staticmethod + def serialize_base64(attr, **kwargs): + """Serialize str into base-64 string. + + :param attr: Object to be serialized. + :rtype: str + """ + encoded = b64encode(attr).decode("ascii") + return encoded.strip("=").replace("+", "-").replace("/", "_") + + @staticmethod + def serialize_decimal(attr, **kwargs): + """Serialize Decimal object to float. + + :param attr: Object to be serialized. + :rtype: float + """ + return float(attr) + + @staticmethod + def serialize_long(attr, **kwargs): + """Serialize long (Py2) or int (Py3). + + :param attr: Object to be serialized. + :rtype: int/long + """ + return _long_type(attr) + + @staticmethod + def serialize_date(attr, **kwargs): + """Serialize Date object into ISO-8601 formatted string. + + :param Date attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_date(attr) + t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) + return t + + @staticmethod + def serialize_time(attr, **kwargs): + """Serialize Time object into ISO-8601 formatted string. + + :param datetime.time attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_time(attr) + t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) + if attr.microsecond: + t += ".{:02}".format(attr.microsecond) + return t + + @staticmethod + def serialize_duration(attr, **kwargs): + """Serialize TimeDelta object into ISO-8601 formatted string. + + :param TimeDelta attr: Object to be serialized. + :rtype: str + """ + if isinstance(attr, str): + attr = isodate.parse_duration(attr) + return isodate.duration_isoformat(attr) + + @staticmethod + def serialize_rfc(attr, **kwargs): + """Serialize Datetime object into RFC-1123 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: TypeError if format invalid. + """ + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + except AttributeError: + raise TypeError("RFC1123 object must be valid Datetime object.") + + return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( + Serializer.days[utc.tm_wday], + utc.tm_mday, + Serializer.months[utc.tm_mon], + utc.tm_year, + utc.tm_hour, + utc.tm_min, + utc.tm_sec, + ) + + @staticmethod + def serialize_iso(attr, **kwargs): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: SerializationError if format invalid. + """ + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") + if microseconds: + microseconds = "." + microseconds + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec + ) + return date + microseconds + "Z" + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(SerializationError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + @staticmethod + def serialize_unix(attr, **kwargs): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param Datetime attr: Object to be serialized. + :rtype: int + :raises: SerializationError if format invalid + """ + if isinstance(attr, int): + return attr + try: + if not attr.tzinfo: + _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") + return int(calendar.timegm(attr.utctimetuple())) + except AttributeError: + raise TypeError("Unix time object must be valid Datetime object.") + + +def rest_key_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + dict_keys = cast(List[str], _FLATTEN.split(key)) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None + key = ".".join(dict_keys[1:]) + + return working_data.get(key) + + +def rest_key_case_insensitive_extractor(attr, attr_desc, data): + key = attr_desc["key"] + working_data = data + + while "." in key: + dict_keys = _FLATTEN.split(key) + if len(dict_keys) == 1: + key = _decode_attribute_map_key(dict_keys[0]) + break + working_key = _decode_attribute_map_key(dict_keys[0]) + working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None + key = ".".join(dict_keys[1:]) + + if working_data: + return attribute_key_case_insensitive_extractor(key, None, working_data) + + +def last_rest_key_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key.""" + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_extractor(dict_keys[-1], None, data) + + +def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): + """Extract the attribute in "data" based on the last part of the JSON path key. + + This is the case insensitive version of "last_rest_key_extractor" + """ + key = attr_desc["key"] + dict_keys = _FLATTEN.split(key) + return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) + + +def attribute_key_extractor(attr, _, data): + return data.get(attr) + + +def attribute_key_case_insensitive_extractor(attr, _, data): + found_key = None + lower_attr = attr.lower() + for key in data: + if lower_attr == key.lower(): + found_key = key + break + + return data.get(found_key) + + +def _extract_name_from_internal_type(internal_type): + """Given an internal type XML description, extract correct XML name with namespace. + + :param dict internal_type: An model type + :rtype: tuple + :returns: A tuple XML name + namespace dict + """ + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + xml_name = internal_type_xml_map.get("name", internal_type.__name__) + xml_ns = internal_type_xml_map.get("ns", None) + if xml_ns: + xml_name = "{}{}".format(xml_ns, xml_name) + return xml_name + + +def xml_key_extractor(attr, attr_desc, data): + if isinstance(data, dict): + return None + + # Test if this model is XML ready first + if not isinstance(data, ET.Element): + return None + + xml_desc = attr_desc.get("xml", {}) + xml_name = xml_desc.get("name", attr_desc["key"]) + + # Look for a children + is_iter_type = attr_desc["type"].startswith("[") + is_wrapped = xml_desc.get("wrapped", False) + internal_type = attr_desc.get("internalType", None) + internal_type_xml_map = getattr(internal_type, "_xml_map", {}) + + # Integrate namespace if necessary + xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) + if xml_ns: + xml_name = "{}{}".format(xml_ns, xml_name) + + # If it's an attribute, that's simple + if xml_desc.get("attr", False): + return data.get(xml_name) + + # If it's x-ms-text, that's simple too + if xml_desc.get("text", False): + return data.text + + # Scenario where I take the local name: + # - Wrapped node + # - Internal type is an enum (considered basic types) + # - Internal type has no XML/Name node + if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): + children = data.findall(xml_name) + # If internal type has a local name and it's not a list, I use that name + elif not is_iter_type and internal_type and "name" in internal_type_xml_map: + xml_name = _extract_name_from_internal_type(internal_type) + children = data.findall(xml_name) + # That's an array + else: + if internal_type: # Complex type, ignore itemsName and use the complex type name + items_name = _extract_name_from_internal_type(internal_type) + else: + items_name = xml_desc.get("itemsName", xml_name) + children = data.findall(items_name) + + if len(children) == 0: + if is_iter_type: + if is_wrapped: + return None # is_wrapped no node, we want None + else: + return [] # not wrapped, assume empty list + return None # Assume it's not there, maybe an optional node. + + # If is_iter_type and not wrapped, return all found children + if is_iter_type: + if not is_wrapped: + return children + else: # Iter and wrapped, should have found one node only (the wrap one) + if len(children) != 1: + raise DeserializationError( + "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( + xml_name + ) + ) + return list(children[0]) # Might be empty list and that's ok. + + # Here it's not a itertype, we should have found one element only or empty + if len(children) > 1: + raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) + return children[0] + + +class Deserializer(object): + """Response object model deserializer. + + :param dict classes: Class type dictionary for deserializing complex types. + :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. + """ + + basic_types = {str: "str", int: "int", bool: "bool", float: "float"} + + valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") + + def __init__(self, classes: Optional[Mapping[str, Type[ModelType]]] = None): + self.deserialize_type = { + "iso-8601": Deserializer.deserialize_iso, + "rfc-1123": Deserializer.deserialize_rfc, + "unix-time": Deserializer.deserialize_unix, + "duration": Deserializer.deserialize_duration, + "date": Deserializer.deserialize_date, + "time": Deserializer.deserialize_time, + "decimal": Deserializer.deserialize_decimal, + "long": Deserializer.deserialize_long, + "bytearray": Deserializer.deserialize_bytearray, + "base64": Deserializer.deserialize_base64, + "object": self.deserialize_object, + "[]": self.deserialize_iter, + "{}": self.deserialize_dict, + } + self.deserialize_expected_types = { + "duration": (isodate.Duration, datetime.timedelta), + "iso-8601": (datetime.datetime), + } + self.dependencies: Dict[str, Type[ModelType]] = dict(classes) if classes else {} + self.key_extractors = [rest_key_extractor, xml_key_extractor] + # Additional properties only works if the "rest_key_extractor" is used to + # extract the keys. Making it to work whatever the key extractor is too much + # complicated, with no real scenario for now. + # So adding a flag to disable additional properties detection. This flag should be + # used if your expect the deserialization to NOT come from a JSON REST syntax. + # Otherwise, result are unexpected + self.additional_properties_detection = True + + def __call__(self, target_obj, response_data, content_type=None): + """Call the deserializer to process a REST response. + + :param str target_obj: Target data type to deserialize to. + :param requests.Response response_data: REST response object. + :param str content_type: Swagger "produces" if available. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + data = self._unpack_content(response_data, content_type) + return self._deserialize(target_obj, data) + + def _deserialize(self, target_obj, data): + """Call the deserializer on a model. + + Data needs to be already deserialized as JSON or XML ElementTree + + :param str target_obj: Target data type to deserialize to. + :param object data: Object to deserialize. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + # This is already a model, go recursive just in case + if hasattr(data, "_attribute_map"): + constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] + try: + for attr, mapconfig in data._attribute_map.items(): + if attr in constants: + continue + value = getattr(data, attr) + if value is None: + continue + local_type = mapconfig["type"] + internal_data_type = local_type.strip("[]{}") + if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): + continue + setattr(data, attr, self._deserialize(local_type, value)) + return data + except AttributeError: + return + + response, class_name = self._classify_target(target_obj, data) + + if isinstance(response, basestring): + return self.deserialize_data(data, response) + elif isinstance(response, type) and issubclass(response, Enum): + return self.deserialize_enum(data, response) + + if data is None: + return data + try: + attributes = response._attribute_map # type: ignore + d_attrs = {} + for attr, attr_desc in attributes.items(): + # Check empty string. If it's not empty, someone has a real "additionalProperties"... + if attr == "additional_properties" and attr_desc["key"] == "": + continue + raw_value = None + # Enhance attr_desc with some dynamic data + attr_desc = attr_desc.copy() # Do a copy, do not change the real one + internal_data_type = attr_desc["type"].strip("[]{}") + if internal_data_type in self.dependencies: + attr_desc["internalType"] = self.dependencies[internal_data_type] + + for key_extractor in self.key_extractors: + found_value = key_extractor(attr, attr_desc, data) + if found_value is not None: + if raw_value is not None and raw_value != found_value: + msg = ( + "Ignoring extracted value '%s' from %s for key '%s'" + " (duplicate extraction, follow extractors order)" + ) + _LOGGER.warning(msg, found_value, key_extractor, attr) + continue + raw_value = found_value + + value = self.deserialize_data(raw_value, attr_desc["type"]) + d_attrs[attr] = value + except (AttributeError, TypeError, KeyError) as err: + msg = "Unable to deserialize to object: " + class_name # type: ignore + raise_with_traceback(DeserializationError, msg, err) + else: + additional_properties = self._build_additional_properties(attributes, data) + return self._instantiate_model(response, d_attrs, additional_properties) + + def _build_additional_properties(self, attribute_map, data): + if not self.additional_properties_detection: + return None + if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": + # Check empty string. If it's not empty, someone has a real "additionalProperties" + return None + if isinstance(data, ET.Element): + data = {el.tag: el.text for el in data} + + known_keys = { + _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) + for desc in attribute_map.values() + if desc["key"] != "" + } + present_keys = set(data.keys()) + missing_keys = present_keys - known_keys + return {key: data[key] for key in missing_keys} + + def _classify_target(self, target, data): + """Check to see whether the deserialization target object can + be classified into a subclass. + Once classification has been determined, initialize object. + + :param str target: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + """ + if target is None: + return None, None + + if isinstance(target, basestring): + try: + target = self.dependencies[target] + except KeyError: + return target, target + + try: + target = target._classify(data, self.dependencies) + except AttributeError: + pass # Target is not a Model, no classify + return target, target.__class__.__name__ # type: ignore + + def failsafe_deserialize(self, target_obj, data, content_type=None): + """Ignores any errors encountered in deserialization, + and falls back to not deserializing the object. Recommended + for use in error deserialization, as we want to return the + HttpResponseError to users, and not have them deal with + a deserialization error. + + :param str target_obj: The target object type to deserialize to. + :param str/dict data: The response data to deserialize. + :param str content_type: Swagger "produces" if available. + """ + try: + return self(target_obj, data, content_type=content_type) + except: + _LOGGER.debug( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + @staticmethod + def _unpack_content(raw_data, content_type=None): + """Extract the correct structure for deserialization. + + If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. + if we can't, raise. Your Pipeline should have a RawDeserializer. + + If not a pipeline response and raw_data is bytes or string, use content-type + to decode it. If no content-type, try JSON. + + If raw_data is something else, bypass all logic and return it directly. + + :param raw_data: Data to be processed. + :param content_type: How to parse if raw_data is a string/bytes. + :raises JSONDecodeError: If JSON is requested and parsing is impossible. + :raises UnicodeDecodeError: If bytes is not UTF8 + """ + # Assume this is enough to detect a Pipeline Response without importing it + context = getattr(raw_data, "context", {}) + if context: + if RawDeserializer.CONTEXT_NAME in context: + return context[RawDeserializer.CONTEXT_NAME] + raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") + + # Assume this is enough to recognize universal_http.ClientResponse without importing it + if hasattr(raw_data, "body"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) + + # Assume this enough to recognize requests.Response without importing it. + if hasattr(raw_data, "_content_consumed"): + return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) + + if isinstance(raw_data, (basestring, bytes)) or hasattr(raw_data, "read"): + return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore + return raw_data + + def _instantiate_model(self, response, attrs, additional_properties=None): + """Instantiate a response model passing in deserialized args. + + :param response: The response model class. + :param d_attrs: The deserialized response attributes. + """ + if callable(response): + subtype = getattr(response, "_subtype_map", {}) + try: + readonly = [k for k, v in response._validation.items() if v.get("readonly")] + const = [k for k, v in response._validation.items() if v.get("constant")] + kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} + response_obj = response(**kwargs) + for attr in readonly: + setattr(response_obj, attr, attrs.get(attr)) + if additional_properties: + response_obj.additional_properties = additional_properties + return response_obj + except TypeError as err: + msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore + raise DeserializationError(msg + str(err)) + else: + try: + for attr, value in attrs.items(): + setattr(response, attr, value) + return response + except Exception as exp: + msg = "Unable to populate response model. " + msg += "Type: {}, Error: {}".format(type(response), exp) + raise DeserializationError(msg) + + def deserialize_data(self, data, data_type): + """Process data for deserialization according to data type. + + :param str data: The response string to be deserialized. + :param str data_type: The type to deserialize to. + :raises: DeserializationError if deserialization fails. + :return: Deserialized object. + """ + if data is None: + return data + + try: + if not data_type: + return data + if data_type in self.basic_types.values(): + return self.deserialize_basic(data, data_type) + if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data + + is_a_text_parsing_type = lambda x: x not in ["object", "[]", r"{}"] + if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: + return None + data_val = self.deserialize_type[data_type](data) + return data_val + + iter_type = data_type[0] + data_type[-1] + if iter_type in self.deserialize_type: + return self.deserialize_type[iter_type](data, data_type[1:-1]) + + obj_type = self.dependencies[data_type] + if issubclass(obj_type, Enum): + if isinstance(data, ET.Element): + data = data.text + return self.deserialize_enum(data, obj_type) + + except (ValueError, TypeError, AttributeError) as err: + msg = "Unable to deserialize response data." + msg += " Data: {}, {}".format(data, data_type) + raise_with_traceback(DeserializationError, msg, err) + else: + return self._deserialize(obj_type, data) + + def deserialize_iter(self, attr, iter_type): + """Deserialize an iterable. + + :param list attr: Iterable to be deserialized. + :param str iter_type: The type of object in the iterable. + :rtype: list + """ + if attr is None: + return None + if isinstance(attr, ET.Element): # If I receive an element here, get the children + attr = list(attr) + if not isinstance(attr, (list, set)): + raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) + return [self.deserialize_data(a, iter_type) for a in attr] + + def deserialize_dict(self, attr, dict_type): + """Deserialize a dictionary. + + :param dict/list attr: Dictionary to be deserialized. Also accepts + a list of key, value pairs. + :param str dict_type: The object type of the items in the dictionary. + :rtype: dict + """ + if isinstance(attr, list): + return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} + + if isinstance(attr, ET.Element): + # Transform value into {"Key": "value"} + attr = {el.tag: el.text for el in attr} + return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} + + def deserialize_object(self, attr, **kwargs): + """Deserialize a generic object. + This will be handled as a dictionary. + + :param dict attr: Dictionary to be deserialized. + :rtype: dict + :raises: TypeError if non-builtin datatype encountered. + """ + if attr is None: + return None + if isinstance(attr, ET.Element): + # Do no recurse on XML, just return the tree as-is + return attr + if isinstance(attr, basestring): + return self.deserialize_basic(attr, "str") + obj_type = type(attr) + if obj_type in self.basic_types: + return self.deserialize_basic(attr, self.basic_types[obj_type]) + if obj_type is _long_type: + return self.deserialize_long(attr) + + if obj_type == dict: + deserialized = {} + for key, value in attr.items(): + try: + deserialized[key] = self.deserialize_object(value, **kwargs) + except ValueError: + deserialized[key] = None + return deserialized + + if obj_type == list: + deserialized = [] + for obj in attr: + try: + deserialized.append(self.deserialize_object(obj, **kwargs)) + except ValueError: + pass + return deserialized + + else: + error = "Cannot deserialize generic object with type: " + raise TypeError(error + str(obj_type)) + + def deserialize_basic(self, attr, data_type): + """Deserialize basic builtin data type from string. + Will attempt to convert to str, int, float and bool. + This function will also accept '1', '0', 'true' and 'false' as + valid bool values. + + :param str attr: response string to be deserialized. + :param str data_type: deserialization data type. + :rtype: str, int, float or bool + :raises: TypeError if string format is not valid. + """ + # If we're here, data is supposed to be a basic type. + # If it's still an XML node, take the text + if isinstance(attr, ET.Element): + attr = attr.text + if not attr: + if data_type == "str": + # None or '', node is empty string. + return "" + else: + # None or '', node with a strong type is None. + # Don't try to model "empty bool" or "empty int" + return None + + if data_type == "bool": + if attr in [True, False, 1, 0]: + return bool(attr) + elif isinstance(attr, basestring): + if attr.lower() in ["true", "1"]: + return True + elif attr.lower() in ["false", "0"]: + return False + raise TypeError("Invalid boolean value: {}".format(attr)) + + if data_type == "str": + return self.deserialize_unicode(attr) + return eval(data_type)(attr) # nosec + + @staticmethod + def deserialize_unicode(data): + """Preserve unicode objects in Python 2, otherwise return data + as a string. + + :param str data: response string to be deserialized. + :rtype: str or unicode + """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string + try: + if isinstance(data, unicode): # type: ignore + return data + except NameError: + return str(data) + else: + return str(data) + + @staticmethod + def deserialize_enum(data, enum_obj): + """Deserialize string into enum object. + + If the string is not a valid enum value it will be returned as-is + and a warning will be logged. + + :param str data: Response string to be deserialized. If this value is + None or invalid it will be returned as-is. + :param Enum enum_obj: Enum object to deserialize to. + :rtype: Enum + """ + if isinstance(data, enum_obj) or data is None: + return data + if isinstance(data, Enum): + data = data.value + if isinstance(data, int): + # Workaround. We might consider remove it in the future. + # https://github.com/Azure/azure-rest-api-specs/issues/141 + try: + return list(enum_obj.__members__.values())[data] + except IndexError: + error = "{!r} is not a valid index for enum {!r}" + raise DeserializationError(error.format(data, enum_obj)) + try: + return enum_obj(str(data)) + except ValueError: + for enum_value in enum_obj: + if enum_value.value.lower() == str(data).lower(): + return enum_value + # We don't fail anymore for unknown value, we deserialize as a string + _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) + return Deserializer.deserialize_unicode(data) + + @staticmethod + def deserialize_bytearray(attr): + """Deserialize string into bytearray. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return bytearray(b64decode(attr)) # type: ignore + + @staticmethod + def deserialize_base64(attr): + """Deserialize base64 encoded string into string. + + :param str attr: response string to be deserialized. + :rtype: bytearray + :raises: TypeError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return b64decode(encoded) + + @staticmethod + def deserialize_decimal(attr): + """Deserialize string into Decimal object. + + :param str attr: response string to be deserialized. + :rtype: Decimal + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + return decimal.Decimal(attr) # type: ignore + except decimal.DecimalException as err: + msg = "Invalid decimal {}".format(attr) + raise_with_traceback(DeserializationError, msg, err) + + @staticmethod + def deserialize_long(attr): + """Deserialize string into long (Py2) or int (Py3). + + :param str attr: response string to be deserialized. + :rtype: long or int + :raises: ValueError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + return _long_type(attr) # type: ignore + + @staticmethod + def deserialize_duration(attr): + """Deserialize ISO-8601 formatted string into TimeDelta object. + + :param str attr: response string to be deserialized. + :rtype: TimeDelta + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + duration = isodate.parse_duration(attr) + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize duration object." + raise_with_traceback(DeserializationError, msg, err) + else: + return duration + + @staticmethod + def deserialize_date(attr): + """Deserialize ISO-8601 formatted string into Date object. + + :param str attr: response string to be deserialized. + :rtype: Date + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) + + @staticmethod + def deserialize_time(attr): + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore + raise DeserializationError("Date must have only digits and -. Received: %s" % attr) + return isodate.parse_time(attr) + + @staticmethod + def deserialize_rfc(attr): + """Deserialize RFC-1123 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + parsed_date = email.utils.parsedate_tz(attr) # type: ignore + date_obj = datetime.datetime( + *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) + ) + if not date_obj.tzinfo: + date_obj = date_obj.astimezone(tz=TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to rfc datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj + + @staticmethod + def deserialize_iso(attr): + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: Datetime + :raises: DeserializationError if string format invalid. + """ + if isinstance(attr, ET.Element): + attr = attr.text + try: + attr = attr.upper() # type: ignore + match = Deserializer.valid_date.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + except (ValueError, OverflowError, AttributeError) as err: + msg = "Cannot deserialize datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj + + @staticmethod + def deserialize_unix(attr): + """Serialize Datetime object into IntTime format. + This is represented as seconds. + + :param int attr: Object to be serialized. + :rtype: Datetime + :raises: DeserializationError if format invalid + """ + if isinstance(attr, ET.Element): + attr = int(attr.text) # type: ignore + try: + date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) + except ValueError as err: + msg = "Cannot deserialize to unix datetime object." + raise_with_traceback(DeserializationError, msg, err) + else: + return date_obj diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_vendor.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_vendor.py new file mode 100644 index 00000000000..bd0df84f531 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/_vendor.py @@ -0,0 +1,30 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from typing import List, cast + +from azure.core.pipeline.transport import HttpRequest + + +def _convert_request(request, files=None): + data = request.content if not files else None + request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data) + if files: + request.set_formdata_body(files) + return request + + +def _format_url_section(template, **kwargs): + components = template.split("/") + while components: + try: + return template.format(**kwargs) + except KeyError as key: + # Need the cast, as for some reasons "split" is typed as list[str | Any] + formatted_components = cast(List[str], template.split("/")) + components = [c for c in formatted_components if "{}".format(key.args[0]) not in c] + template = "/".join(components) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/__init__.py new file mode 100644 index 00000000000..0b86b0a3392 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/__init__.py @@ -0,0 +1,23 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._azure_file_storage import AzureFileStorage + +try: + from ._patch import __all__ as _patch_all + from ._patch import * # pylint: disable=unused-wildcard-import +except ImportError: + _patch_all = [] +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AzureFileStorage", +] +__all__.extend([p for p in _patch_all if p not in __all__]) + +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_azure_file_storage.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_azure_file_storage.py new file mode 100644 index 00000000000..3201f13bfc8 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_azure_file_storage.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, Optional, Union + +from azure.core import AsyncPipelineClient +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .. import models as _models +from .._serialization import Deserializer, Serializer +from ._configuration import AzureFileStorageConfiguration +from .operations import DirectoryOperations, FileOperations, ServiceOperations, ShareOperations + + +class AzureFileStorage: # pylint: disable=client-accepts-api-version-keyword + """AzureFileStorage. + + :ivar service: ServiceOperations operations + :vartype service: azure.storage.fileshare.aio.operations.ServiceOperations + :ivar share: ShareOperations operations + :vartype share: azure.storage.fileshare.aio.operations.ShareOperations + :ivar directory: DirectoryOperations operations + :vartype directory: azure.storage.fileshare.aio.operations.DirectoryOperations + :ivar file: FileOperations operations + :vartype file: azure.storage.fileshare.aio.operations.FileOperations + :param url: The URL of the service account, share, directory or file that is the target of the + desired operation. Required. + :type url: str + :param base_url: Service URL. Required. Default value is "". + :type base_url: str + :param file_request_intent: Valid value is backup. "backup" Default value is None. + :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent + :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + Default value is None. + :type allow_trailing_dot: bool + :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source + URI. Default value is None. + :type allow_source_trailing_dot: bool + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2022-11-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes + downloaded from the source url into the specified range. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype file_range_write_from_url: str + """ + + def __init__( # pylint: disable=missing-client-constructor-parameter-credential + self, + url: str, + base_url: str = "", + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + **kwargs: Any + ) -> None: + self._config = AzureFileStorageConfiguration( + url=url, + file_request_intent=file_request_intent, + allow_trailing_dot=allow_trailing_dot, + allow_source_trailing_dot=allow_source_trailing_dot, + **kwargs + ) + self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) + + client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} + self._serialize = Serializer(client_models) + self._deserialize = Deserializer(client_models) + self._serialize.client_side_validation = False + self.service = ServiceOperations(self._client, self._config, self._serialize, self._deserialize) + self.share = ShareOperations(self._client, self._config, self._serialize, self._deserialize) + self.directory = DirectoryOperations(self._client, self._config, self._serialize, self._deserialize) + self.file = FileOperations(self._client, self._config, self._serialize, self._deserialize) + + def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client._send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + request_copy.url = self._client.format_url(request_copy.url) + return self._client.send_request(request_copy, **kwargs) + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> "AzureFileStorage": + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_configuration.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_configuration.py new file mode 100644 index 00000000000..15e54f80705 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_configuration.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import sys +from typing import Any, Optional, Union + +from azure.core.configuration import Configuration +from azure.core.pipeline import policies + +from .. import models as _models + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports + +VERSION = "unknown" + + +class AzureFileStorageConfiguration(Configuration): # pylint: disable=too-many-instance-attributes + """Configuration for AzureFileStorage. + + Note that all parameters used to create this instance are saved as instance + attributes. + + :param url: The URL of the service account, share, directory or file that is the target of the + desired operation. Required. + :type url: str + :param file_request_intent: Valid value is backup. "backup" Default value is None. + :type file_request_intent: str or ~azure.storage.fileshare.models.ShareTokenIntent + :param allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + Default value is None. + :type allow_trailing_dot: bool + :param allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source + URI. Default value is None. + :type allow_source_trailing_dot: bool + :keyword version: Specifies the version of the operation to use for this request. Default value + is "2022-11-02". Note that overriding this default value may result in unsupported behavior. + :paramtype version: str + :keyword file_range_write_from_url: Only update is supported: - Update: Writes the bytes + downloaded from the source url into the specified range. Default value is "update". Note that + overriding this default value may result in unsupported behavior. + :paramtype file_range_write_from_url: str + """ + + def __init__( + self, + url: str, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + **kwargs: Any + ) -> None: + super(AzureFileStorageConfiguration, self).__init__(**kwargs) + version: Literal["2022-11-02"] = kwargs.pop("version", "2022-11-02") + file_range_write_from_url: Literal["update"] = kwargs.pop("file_range_write_from_url", "update") + + if url is None: + raise ValueError("Parameter 'url' must not be None.") + + self.url = url + self.file_request_intent = file_request_intent + self.allow_trailing_dot = allow_trailing_dot + self.allow_source_trailing_dot = allow_source_trailing_dot + self.version = version + self.file_range_write_from_url = file_range_write_from_url + kwargs.setdefault("sdk_moniker", "azurefilestorage/{}".format(VERSION)) + self._configure(**kwargs) + + def _configure(self, **kwargs: Any) -> None: + self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) + self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) + self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) + self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) + self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) + self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) + self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) + self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) + self.authentication_policy = kwargs.get("authentication_policy") diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_patch.py new file mode 100644 index 00000000000..f99e77fef98 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/_patch.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +# +# -------------------------------------------------------------------------- + +# This file is used for handwritten extensions to the generated code. Example: +# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md +def patch_sdk(): + pass diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/__init__.py new file mode 100644 index 00000000000..5e0376c1c30 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._share_operations import ShareOperations +from ._directory_operations import DirectoryOperations +from ._file_operations import FileOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ServiceOperations", + "ShareOperations", + "DirectoryOperations", + "FileOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_directory_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_directory_operations.py new file mode 100644 index 00000000000..f2bd09af933 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_directory_operations.py @@ -0,0 +1,1057 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._directory_operations import ( + build_create_request, + build_delete_request, + build_force_close_handles_request, + build_get_properties_request, + build_list_files_and_directories_segment_request, + build_list_handles_request, + build_rename_request, + build_set_metadata_request, + build_set_properties_request, +) + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class DirectoryOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s + :attr:`directory` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def create( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + **kwargs: Any + ) -> None: + """Creates a new directory under the specified share or parent directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_create_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def get_properties( # pylint: disable=inconsistent-return-statements + self, sharesnapshot: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Returns all system properties for the specified directory, and can also be used to check the + existence of a directory. The data returned does not include the files in the directory or any + subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_get_properties_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def delete( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Removes the specified empty directory. Note that the directory must be empty before it can be + deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + url=self._config.url, + timeout=timeout, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def set_properties( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets properties on the directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def set_metadata( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any + ) -> None: + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def list_files_and_directories_segment( + self, + prefix: Optional[str] = None, + sharesnapshot: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + include: Optional[List[Union[str, _models.ListFilesIncludeType]]] = None, + include_extended_info: Optional[bool] = None, + **kwargs: Any + ) -> _models.ListFilesAndDirectoriesSegmentResponse: + """Returns a list of files or directories under the specified share or directory. It lists the + contents only for a single level of the directory hierarchy. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. Default value is None. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] + :param include_extended_info: Include extended information. Default value is None. + :type include_extended_info: bool + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListFilesAndDirectoriesSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list")) + cls: ClsType[_models.ListFilesAndDirectoriesSegmentResponse] = kwargs.pop("cls", None) + + request = build_list_files_and_directories_segment_request( + url=self._config.url, + prefix=prefix, + sharesnapshot=sharesnapshot, + marker=marker, + maxresults=maxresults, + timeout=timeout, + include=include, + include_extended_info=include_extended_info, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.list_files_and_directories_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListFilesAndDirectoriesSegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_files_and_directories_segment.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def list_handles( + self, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> _models.ListHandlesResponse: + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. Default value is None. + :type recursive: bool + :keyword comp: comp. Default value is "listhandles". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles")) + cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None) + + request = build_list_handles_request( + url=self._config.url, + marker=marker, + maxresults=maxresults, + timeout=timeout, + sharesnapshot=sharesnapshot, + recursive=recursive, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.list_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListHandlesResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_handles.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def force_close_handles( # pylint: disable=inconsistent-return-statements + self, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. Required. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. Default value is None. + :type recursive: bool + :keyword comp: comp. Default value is "forceclosehandles". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_force_close_handles_request( + url=self._config.url, + handle_id=handle_id, + timeout=timeout, + marker=marker, + sharesnapshot=sharesnapshot, + recursive=recursive, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.force_close_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker")) + response_headers["x-ms-number-of-handles-closed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-closed") + ) + response_headers["x-ms-number-of-handles-failed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-failed") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace_async + async def rename( # pylint: disable=inconsistent-return-statements + self, + rename_source: str, + timeout: Optional[int] = None, + replace_if_exists: Optional[bool] = None, + ignore_read_only: Optional[bool] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None, + destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None, + copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None, + **kwargs: Any + ) -> None: + """Renames a directory. + + :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in + length. Required. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param replace_if_exists: Optional. A boolean value for if the destination file already exists, + whether this request will overwrite the file or not. If true, the rename will succeed and will + overwrite the destination file. If not provided or if false and the destination file does + exist, the request will not overwrite the destination file. If provided and the destination + file doesn’t exist, the rename will succeed. Note: This value does not override the + x-ms-file-copy-ignore-read-only header value. Default value is None. + :type replace_if_exists: bool + :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly + attribute on a preexisting destination file should be respected. If true, the rename will + succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will + cause the rename to fail. Default value is None. + :type ignore_read_only: bool + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param source_lease_access_conditions: Parameter group. Default value is None. + :type source_lease_access_conditions: + ~azure.storage.fileshare.models.SourceLeaseAccessConditions + :param destination_lease_access_conditions: Parameter group. Default value is None. + :type destination_lease_access_conditions: + ~azure.storage.fileshare.models.DestinationLeaseAccessConditions + :param copy_file_smb_info: Parameter group. Default value is None. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "rename". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _source_lease_id = None + _destination_lease_id = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _file_change_time = None + if source_lease_access_conditions is not None: + _source_lease_id = source_lease_access_conditions.source_lease_id + if destination_lease_access_conditions is not None: + _destination_lease_id = destination_lease_access_conditions.destination_lease_id + if copy_file_smb_info is not None: + _file_attributes = copy_file_smb_info.file_attributes + _file_change_time = copy_file_smb_info.file_change_time + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + + request = build_rename_request( + url=self._config.url, + rename_source=rename_source, + timeout=timeout, + replace_if_exists=replace_if_exists, + ignore_read_only=ignore_read_only, + source_lease_id=_source_lease_id, + destination_lease_id=_destination_lease_id, + file_attributes=_file_attributes, + file_creation_time=_file_creation_time, + file_last_write_time=_file_last_write_time, + file_change_time=_file_change_time, + file_permission=file_permission, + file_permission_key=file_permission_key, + metadata=metadata, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.rename.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {"url": "{url}/{shareName}/{directory}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_file_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_file_operations.py new file mode 100644 index 00000000000..144286a9b93 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_file_operations.py @@ -0,0 +1,2226 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, AsyncIterator, Callable, Dict, IO, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._file_operations import ( + build_abort_copy_request, + build_acquire_lease_request, + build_break_lease_request, + build_change_lease_request, + build_create_request, + build_delete_request, + build_download_request, + build_force_close_handles_request, + build_get_properties_request, + build_get_range_list_request, + build_list_handles_request, + build_release_lease_request, + build_rename_request, + build_set_http_headers_request, + build_set_metadata_request, + build_start_copy_request, + build_upload_range_from_url_request, + build_upload_range_request, +) + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class FileOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s + :attr:`file` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def create( # pylint: disable=inconsistent-return-statements + self, + file_content_length: int, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + file_http_headers: Optional[_models.FileHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Creates a new file or replaces a file. Note it only initializes the file with no content. + + :param file_content_length: Specifies the maximum size for the file, up to 4 TB. Required. + :type file_content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :param file_http_headers: Parameter group. Default value is None. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword file_type_constant: Dummy constant parameter, file type can only be file. Default + value is "file". Note that overriding this default value may result in unsupported behavior. + :paramtype file_type_constant: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_cache_control = file_http_headers.file_cache_control + _file_content_disposition = file_http_headers.file_content_disposition + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_type = file_http_headers.file_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_create_request( + url=self._config.url, + file_content_length=file_content_length, + timeout=timeout, + file_content_type=_file_content_type, + file_content_encoding=_file_content_encoding, + file_content_language=_file_content_language, + file_cache_control=_file_cache_control, + file_content_md5=_file_content_md5, + file_content_disposition=_file_content_disposition, + metadata=metadata, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + file_type_constant=file_type_constant, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def download( + self, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> AsyncIterator[bytes]: + """Reads or downloads a file from the system, including its metadata and properties. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param range: Return file data only from the specified byte range. Default value is None. + :type range: str + :param range_get_content_md5: When this header is set to true and specified together with the + Range header, the service returns the MD5 hash for the range, as long as the range is less than + or equal to 4 MB in size. Default value is None. + :type range_get_content_md5: bool + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Async iterator of the response bytes or the result of cls(response) + :rtype: AsyncIterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[AsyncIterator[bytes]] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_download_request( + url=self._config.url, + timeout=timeout, + range=range, + range_get_content_md5=range_get_content_md5, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.download.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-md5") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize( + "str", response.headers.get("x-ms-file-parent-id") + ) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-md5") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize( + "str", response.headers.get("x-ms-file-parent-id") + ) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + download.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def get_properties( # pylint: disable=inconsistent-return-statements + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Returns all user-defined metadata, standard HTTP properties, and system properties for the + file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_properties_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-type"] = self._deserialize("str", response.headers.get("x-ms-type")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition")) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress")) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def delete( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_delete_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def set_http_headers( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + file_content_length: Optional[int] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + file_http_headers: Optional[_models.FileHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Sets HTTP headers on the file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If the specified byte value + is less than the current size of the file, then all ranges above the specified byte value are + cleared. Default value is None. + :type file_content_length: int + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :param file_http_headers: Parameter group. Default value is None. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_cache_control = file_http_headers.file_cache_control + _file_content_disposition = file_http_headers.file_content_disposition + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_type = file_http_headers.file_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_http_headers_request( + url=self._config.url, + timeout=timeout, + file_content_length=file_content_length, + file_content_type=_file_content_type, + file_content_encoding=_file_content_encoding, + file_content_language=_file_content_language, + file_cache_control=_file_cache_control, + file_content_md5=_file_content_md5, + file_content_disposition=_file_content_disposition, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.set_http_headers.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def release_lease( # pylint: disable=inconsistent-return-statements + self, lease_id: str, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + proposed_lease_id=proposed_lease_id, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def upload_range( # pylint: disable=inconsistent-return-statements + self, + range: str, + content_length: int, + timeout: Optional[int] = None, + file_range_write: Union[str, _models.FileRangeWriteType] = "update", + content_md5: Optional[bytes] = None, + file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + optionalbody: Optional[IO] = None, + **kwargs: Any + ) -> None: + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the start and end of the range + must be specified. For an update operation, the range can be up to 4 MB in size. For a clear + operation, the range can be up to the value of the file's full size. The File service accepts + only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be + specified in the following format: bytes=startByte-endByte. Required. + :type range: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param file_range_write: Specify one of the following options: - Update: Writes the bytes + specified by the request body into the specified range. The Range and Content-Length headers + must match to perform the update. - Clear: Clears the specified range and releases the space + used in storage for that range. To clear a range, set the Content-Length header to zero, and + set the Range header to a value that indicates the range to clear, up to maximum file size. + Known values are: "update" and "clear". Default value is "update". + :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType + :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of + the data during transport. When the Content-MD5 header is specified, the File service compares + the hash of the content that has arrived with the header value that was sent. If the two hashes + do not match, the operation will fail with error code 400 (Bad Request). Default value is None. + :type content_md5: bytes + :param file_last_written_mode: If the file last write time should be preserved or overwritten. + Known values are: "Now" and "Preserve". Default value is None. + :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :param optionalbody: Initial data. Default value is None. + :type optionalbody: IO + :keyword comp: comp. Default value is "range". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range")) + content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + _content = optionalbody + + request = build_upload_range_request( + url=self._config.url, + range=range, + content_length=content_length, + timeout=timeout, + file_range_write=file_range_write, + content_md5=content_md5, + lease_id=_lease_id, + file_last_written_mode=file_last_written_mode, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.upload_range.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def upload_range_from_url( # pylint: disable=inconsistent-return-statements + self, + range: str, + copy_source: str, + content_length: int, + timeout: Optional[int] = None, + source_range: Optional[str] = None, + source_content_crc64: Optional[bytes] = None, + copy_source_authorization: Optional[str] = None, + file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Upload a range of bytes to a file where the contents are read from a URL. + + :param range: Writes data to the specified byte range in the file. Required. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. Required. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param source_range: Bytes of source data in the specified range. Default value is None. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_content_crc64: bytes + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param file_last_written_mode: If the file last write time should be preserved or overwritten. + Known values are: "Now" and "Preserve". Default value is None. + :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "range". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _source_if_match_crc64 = None + _source_if_none_match_crc64 = None + _lease_id = None + if source_modified_access_conditions is not None: + _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_upload_range_from_url_request( + url=self._config.url, + range=range, + copy_source=copy_source, + content_length=content_length, + timeout=timeout, + source_range=source_range, + source_content_crc64=source_content_crc64, + source_if_match_crc64=_source_if_match_crc64, + source_if_none_match_crc64=_source_if_none_match_crc64, + lease_id=_lease_id, + copy_source_authorization=copy_source_authorization, + file_last_written_mode=file_last_written_mode, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + comp=comp, + file_range_write_from_url=self._config.file_range_write_from_url, + version=self._config.version, + template_url=self.upload_range_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range_from_url.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def get_range_list( + self, + sharesnapshot: Optional[str] = None, + prevsharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> _models.ShareFileRangeList: + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, + when present, specifies the previous snapshot. Default value is None. + :type prevsharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, inclusively. Default + value is None. + :type range: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "rangelist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareFileRangeList or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareFileRangeList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["rangelist"] = kwargs.pop("comp", _params.pop("comp", "rangelist")) + cls: ClsType[_models.ShareFileRangeList] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_range_list_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + prevsharesnapshot=prevsharesnapshot, + timeout=timeout, + range=range, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.get_range_list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-content-length"] = self._deserialize("int", response.headers.get("x-ms-content-length")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ShareFileRangeList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_range_list.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def start_copy( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param copy_file_smb_info: Parameter group. Default value is None. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _file_permission_copy_mode = None + _ignore_read_only = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _file_change_time = None + _set_archive_attribute = None + _lease_id = None + if copy_file_smb_info is not None: + _file_attributes = copy_file_smb_info.file_attributes + _file_change_time = copy_file_smb_info.file_change_time + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + _ignore_read_only = copy_file_smb_info.ignore_read_only + _set_archive_attribute = copy_file_smb_info.set_archive_attribute + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_start_copy_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + metadata=metadata, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_permission_copy_mode=_file_permission_copy_mode, + ignore_read_only=_ignore_read_only, + file_attributes=_file_attributes, + file_creation_time=_file_creation_time, + file_last_write_time=_file_last_write_time, + file_change_time=_file_change_time, + set_archive_attribute=_set_archive_attribute, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.start_copy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def abort_copy( # pylint: disable=inconsistent-return-statements + self, + copy_id: str, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Aborts a pending Copy File operation, and leaves a destination file with zero length and full + metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + File operation. Required. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "copy". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword copy_action_abort_constant: Abort. Default value is "abort". Note that overriding this + default value may result in unsupported behavior. + :paramtype copy_action_abort_constant: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy")) + copy_action_abort_constant: Literal["abort"] = kwargs.pop( + "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_abort_copy_request( + url=self._config.url, + copy_id=copy_id, + timeout=timeout, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + copy_action_abort_constant=copy_action_abort_constant, + version=self._config.version, + template_url=self.abort_copy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def list_handles( + self, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + **kwargs: Any + ) -> _models.ListHandlesResponse: + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :keyword comp: comp. Default value is "listhandles". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles")) + cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None) + + request = build_list_handles_request( + url=self._config.url, + marker=marker, + maxresults=maxresults, + timeout=timeout, + sharesnapshot=sharesnapshot, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.list_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListHandlesResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_handles.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def force_close_handles( # pylint: disable=inconsistent-return-statements + self, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + **kwargs: Any + ) -> None: + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. Required. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :keyword comp: comp. Default value is "forceclosehandles". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_force_close_handles_request( + url=self._config.url, + handle_id=handle_id, + timeout=timeout, + marker=marker, + sharesnapshot=sharesnapshot, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.force_close_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker")) + response_headers["x-ms-number-of-handles-closed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-closed") + ) + response_headers["x-ms-number-of-handles-failed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-failed") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace_async + async def rename( # pylint: disable=inconsistent-return-statements + self, + rename_source: str, + timeout: Optional[int] = None, + replace_if_exists: Optional[bool] = None, + ignore_read_only: Optional[bool] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None, + destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None, + copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None, + file_http_headers: Optional[_models.FileHTTPHeaders] = None, + **kwargs: Any + ) -> None: + """Renames a file. + + :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in + length. Required. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param replace_if_exists: Optional. A boolean value for if the destination file already exists, + whether this request will overwrite the file or not. If true, the rename will succeed and will + overwrite the destination file. If not provided or if false and the destination file does + exist, the request will not overwrite the destination file. If provided and the destination + file doesn’t exist, the rename will succeed. Note: This value does not override the + x-ms-file-copy-ignore-read-only header value. Default value is None. + :type replace_if_exists: bool + :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly + attribute on a preexisting destination file should be respected. If true, the rename will + succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will + cause the rename to fail. Default value is None. + :type ignore_read_only: bool + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param source_lease_access_conditions: Parameter group. Default value is None. + :type source_lease_access_conditions: + ~azure.storage.fileshare.models.SourceLeaseAccessConditions + :param destination_lease_access_conditions: Parameter group. Default value is None. + :type destination_lease_access_conditions: + ~azure.storage.fileshare.models.DestinationLeaseAccessConditions + :param copy_file_smb_info: Parameter group. Default value is None. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :param file_http_headers: Parameter group. Default value is None. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :keyword comp: comp. Default value is "rename". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _source_lease_id = None + _destination_lease_id = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _file_change_time = None + _file_content_type = None + if source_lease_access_conditions is not None: + _source_lease_id = source_lease_access_conditions.source_lease_id + if destination_lease_access_conditions is not None: + _destination_lease_id = destination_lease_access_conditions.destination_lease_id + if copy_file_smb_info is not None: + _file_attributes = copy_file_smb_info.file_attributes + _file_change_time = copy_file_smb_info.file_change_time + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + if file_http_headers is not None: + _file_content_type = file_http_headers.file_content_type + + request = build_rename_request( + url=self._config.url, + rename_source=rename_source, + timeout=timeout, + replace_if_exists=replace_if_exists, + ignore_read_only=ignore_read_only, + source_lease_id=_source_lease_id, + destination_lease_id=_destination_lease_id, + file_attributes=_file_attributes, + file_creation_time=_file_creation_time, + file_last_write_time=_file_last_write_time, + file_change_time=_file_change_time, + file_permission=file_permission, + file_permission_key=file_permission_key, + metadata=metadata, + file_content_type=_file_content_type, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.rename.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_service_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_service_operations.py new file mode 100644 index 00000000000..8bfccf8b747 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_service_operations.py @@ -0,0 +1,305 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._service_operations import ( + build_get_properties_request, + build_list_shares_segment_request, + build_set_properties_request, +) + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ServiceOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s + :attr:`service` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def set_properties( # pylint: disable=inconsistent-return-statements + self, storage_service_properties: _models.StorageServiceProperties, timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Sets properties for a storage account's File service endpoint, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. Required. + :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True) + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}"} + + @distributed_trace_async + async def get_properties(self, timeout: Optional[int] = None, **kwargs: Any) -> _models.StorageServiceProperties: + """Gets the properties of a storage account's File service, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[_models.StorageServiceProperties] = kwargs.pop("cls", None) + + request = build_get_properties_request( + url=self._config.url, + timeout=timeout, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("StorageServiceProperties", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_properties.metadata = {"url": "{url}"} + + @distributed_trace_async + async def list_shares_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, _models.ListSharesIncludeType]]] = None, + timeout: Optional[int] = None, + **kwargs: Any + ) -> _models.ListSharesResponse: + """The List Shares Segment operation returns a list of the shares and share snapshots under the + specified account. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSharesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list")) + cls: ClsType[_models.ListSharesResponse] = kwargs.pop("cls", None) + + request = build_list_shares_segment_request( + url=self._config.url, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + comp=comp, + version=self._config.version, + template_url=self.list_shares_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("ListSharesResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_shares_segment.metadata = {"url": "{url}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_share_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_share_operations.py new file mode 100644 index 00000000000..29f13694ebd --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/aio/operations/_share_operations.py @@ -0,0 +1,1762 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import AsyncHttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._vendor import _convert_request +from ...operations._share_operations import ( + build_acquire_lease_request, + build_break_lease_request, + build_change_lease_request, + build_create_permission_request, + build_create_request, + build_create_snapshot_request, + build_delete_request, + build_get_access_policy_request, + build_get_permission_request, + build_get_properties_request, + build_get_statistics_request, + build_release_lease_request, + build_renew_lease_request, + build_restore_request, + build_set_access_policy_request, + build_set_metadata_request, + build_set_properties_request, +) + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class ShareOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.aio.AzureFileStorage`'s + :attr:`share` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def create( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, _models.ShareAccessTier]] = None, + enabled_protocols: Optional[str] = None, + root_squash: Optional[Union[str, _models.ShareRootSquash]] = None, + **kwargs: Any + ) -> None: + """Creates a new share under the specified account. If the share with the same name already + exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None. + :type quota: int + :param access_tier: Specifies the access tier of the share. Known values are: + "TransactionOptimized", "Hot", and "Cool". Default value is None. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param enabled_protocols: Protocols to enable on the share. Default value is None. + :type enabled_protocols: str + :param root_squash: Root squash to set on the share. Only valid for NFS shares. Known values + are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_create_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + quota=quota, + access_tier=access_tier, + enabled_protocols=enabled_protocols, + root_squash=root_squash, + restype=restype, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def get_properties( # pylint: disable=inconsistent-return-statements + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Returns all user-defined metadata and system properties for the specified share or share + snapshot. The data returned does not include the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_properties_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota")) + response_headers["x-ms-share-provisioned-iops"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-iops") + ) + response_headers["x-ms-share-provisioned-ingress-mbps"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-ingress-mbps") + ) + response_headers["x-ms-share-provisioned-egress-mbps"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-egress-mbps") + ) + response_headers["x-ms-share-next-allowed-quota-downgrade-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-share-next-allowed-quota-downgrade-time") + ) + response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps") + ) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier")) + response_headers["x-ms-access-tier-change-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-access-tier-change-time") + ) + response_headers["x-ms-access-tier-transition-state"] = self._deserialize( + "str", response.headers.get("x-ms-access-tier-transition-state") + ) + response_headers["x-ms-enabled-protocols"] = self._deserialize( + "str", response.headers.get("x-ms-enabled-protocols") + ) + response_headers["x-ms-root-squash"] = self._deserialize("str", response.headers.get("x-ms-root-squash")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def delete( # pylint: disable=inconsistent-return-statements + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Operation marks the specified share or share snapshot for deletion. The share or share snapshot + and any files contained within it are later deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the base share and all of its + snapshots. Known values are: "include" and "include-leased". Default value is None. + :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_delete_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + delete_snapshots=delete_snapshots, + lease_id=_lease_id, + restype=restype, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def release_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + proposed_lease_id=proposed_lease_id, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def renew_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "renew". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_renew_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.renew_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + sharesnapshot: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. Default value is None. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + break_period=break_period, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + sharesnapshot=sharesnapshot, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def create_snapshot( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any + ) -> None: + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "snapshot". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_create_snapshot_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.create_snapshot.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {"url": "{url}/{shareName}"} + + @overload + async def create_permission( # pylint: disable=inconsistent-return-statements + self, + share_permission: _models.SharePermission, + timeout: Optional[int] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. Required. + :type share_permission: ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_permission( # pylint: disable=inconsistent-return-statements + self, + share_permission: IO, + timeout: Optional[int] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. Required. + :type share_permission: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_permission( # pylint: disable=inconsistent-return-statements + self, share_permission: Union[_models.SharePermission, IO], timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. Is either a + SharePermission type or a IO type. Required. + :type share_permission: ~azure.storage.fileshare.models.SharePermission or IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(share_permission, (IO, bytes)): + _content = share_permission + else: + _json = self._serialize.body(share_permission, "SharePermission") + + request = build_create_permission_request( + url=self._config.url, + timeout=timeout, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + json=_json, + content=_content, + template_url=self.create_permission.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_permission.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def get_permission( + self, file_permission_key: str, timeout: Optional[int] = None, **kwargs: Any + ) -> _models.SharePermission: + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the directory/file. Required. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SharePermission or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission")) + cls: ClsType[_models.SharePermission] = kwargs.pop("cls", None) + + request = build_get_permission_request( + url=self._config.url, + file_permission_key=file_permission_key, + timeout=timeout, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_permission.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("SharePermission", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_permission.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def set_properties( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, _models.ShareAccessTier]] = None, + root_squash: Optional[Union[str, _models.ShareRootSquash]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Sets properties for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None. + :type quota: int + :param access_tier: Specifies the access tier of the share. Known values are: + "TransactionOptimized", "Hot", and "Cool". Default value is None. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param root_squash: Root squash to set on the share. Only valid for NFS shares. Known values + are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + quota=quota, + access_tier=access_tier, + lease_id=_lease_id, + root_squash=root_squash, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + lease_id=_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def get_access_policy( + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> List[_models.SignedIdentifier]: + """Returns information about stored access policies specified on the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl")) + cls: ClsType[List[_models.SignedIdentifier]] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("[SignedIdentifier]", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_access_policy.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def set_access_policy( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + share_acl: Optional[List[_models.SignedIdentifier]] = None, + **kwargs: Any + ) -> None: + """Sets a stored access policy for use with shared access signatures. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :param share_acl: The ACL for the share. Default value is None. + :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl")) + content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True}} + if share_acl is not None: + _content = self._serialize.body( + share_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt + ) + else: + _content = None + + request = build_set_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def get_statistics( + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> _models.ShareStats: + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "stats". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareStats or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats")) + cls: ClsType[_models.ShareStats] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_statistics_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_statistics.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ShareStats", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_statistics.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace_async + async def restore( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_share_name: Optional[str] = None, + deleted_share_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param deleted_share_name: Specifies the name of the previously-deleted share. Default value is + None. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the previously-deleted share. Default + value is None. + :type deleted_share_version: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "undelete". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_restore_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + deleted_share_name=deleted_share_name, + deleted_share_version=deleted_share_version, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.restore.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {"url": "{url}/{shareName}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/__init__.py new file mode 100644 index 00000000000..b13a08ef345 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/__init__.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._models_py3 import AccessPolicy +from ._models_py3 import ClearRange +from ._models_py3 import CopyFileSmbInfo +from ._models_py3 import CorsRule +from ._models_py3 import DestinationLeaseAccessConditions +from ._models_py3 import DirectoryItem +from ._models_py3 import FileHTTPHeaders +from ._models_py3 import FileItem +from ._models_py3 import FileProperty +from ._models_py3 import FileRange +from ._models_py3 import FilesAndDirectoriesListSegment +from ._models_py3 import HandleItem +from ._models_py3 import LeaseAccessConditions +from ._models_py3 import ListFilesAndDirectoriesSegmentResponse +from ._models_py3 import ListHandlesResponse +from ._models_py3 import ListSharesResponse +from ._models_py3 import Metrics +from ._models_py3 import RetentionPolicy +from ._models_py3 import ShareFileRangeList +from ._models_py3 import ShareItemInternal +from ._models_py3 import SharePermission +from ._models_py3 import SharePropertiesInternal +from ._models_py3 import ShareProtocolSettings +from ._models_py3 import ShareSmbSettings +from ._models_py3 import ShareStats +from ._models_py3 import SignedIdentifier +from ._models_py3 import SmbMultichannel +from ._models_py3 import SourceLeaseAccessConditions +from ._models_py3 import SourceModifiedAccessConditions +from ._models_py3 import StorageError +from ._models_py3 import StorageServiceProperties +from ._models_py3 import StringEncoded + +from ._azure_file_storage_enums import CopyStatusType +from ._azure_file_storage_enums import DeleteSnapshotsOptionType +from ._azure_file_storage_enums import FileLastWrittenMode +from ._azure_file_storage_enums import FileRangeWriteType +from ._azure_file_storage_enums import LeaseDurationType +from ._azure_file_storage_enums import LeaseStateType +from ._azure_file_storage_enums import LeaseStatusType +from ._azure_file_storage_enums import ListFilesIncludeType +from ._azure_file_storage_enums import ListSharesIncludeType +from ._azure_file_storage_enums import PermissionCopyModeType +from ._azure_file_storage_enums import ShareAccessTier +from ._azure_file_storage_enums import ShareRootSquash +from ._azure_file_storage_enums import ShareTokenIntent +from ._azure_file_storage_enums import StorageErrorCode +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "AccessPolicy", + "ClearRange", + "CopyFileSmbInfo", + "CorsRule", + "DestinationLeaseAccessConditions", + "DirectoryItem", + "FileHTTPHeaders", + "FileItem", + "FileProperty", + "FileRange", + "FilesAndDirectoriesListSegment", + "HandleItem", + "LeaseAccessConditions", + "ListFilesAndDirectoriesSegmentResponse", + "ListHandlesResponse", + "ListSharesResponse", + "Metrics", + "RetentionPolicy", + "ShareFileRangeList", + "ShareItemInternal", + "SharePermission", + "SharePropertiesInternal", + "ShareProtocolSettings", + "ShareSmbSettings", + "ShareStats", + "SignedIdentifier", + "SmbMultichannel", + "SourceLeaseAccessConditions", + "SourceModifiedAccessConditions", + "StorageError", + "StorageServiceProperties", + "StringEncoded", + "CopyStatusType", + "DeleteSnapshotsOptionType", + "FileLastWrittenMode", + "FileRangeWriteType", + "LeaseDurationType", + "LeaseStateType", + "LeaseStatusType", + "ListFilesIncludeType", + "ListSharesIncludeType", + "PermissionCopyModeType", + "ShareAccessTier", + "ShareRootSquash", + "ShareTokenIntent", + "StorageErrorCode", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_azure_file_storage_enums.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_azure_file_storage_enums.py new file mode 100644 index 00000000000..aad6f1596ed --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_azure_file_storage_enums.py @@ -0,0 +1,181 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from enum import Enum +from azure.core import CaseInsensitiveEnumMeta + + +class CopyStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """CopyStatusType.""" + + PENDING = "pending" + SUCCESS = "success" + ABORTED = "aborted" + FAILED = "failed" + + +class DeleteSnapshotsOptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """DeleteSnapshotsOptionType.""" + + INCLUDE = "include" + INCLUDE_LEASED = "include-leased" + + +class FileLastWrittenMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """FileLastWrittenMode.""" + + NOW = "Now" + PRESERVE = "Preserve" + + +class FileRangeWriteType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """FileRangeWriteType.""" + + UPDATE = "update" + CLEAR = "clear" + + +class LeaseDurationType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """When a share is leased, specifies whether the lease is of infinite or fixed duration.""" + + INFINITE = "infinite" + FIXED = "fixed" + + +class LeaseStateType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Lease state of the share.""" + + AVAILABLE = "available" + LEASED = "leased" + EXPIRED = "expired" + BREAKING = "breaking" + BROKEN = "broken" + + +class LeaseStatusType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The current lease status of the share.""" + + LOCKED = "locked" + UNLOCKED = "unlocked" + + +class ListFilesIncludeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ListFilesIncludeType.""" + + TIMESTAMPS = "Timestamps" + ETAG = "Etag" + ATTRIBUTES = "Attributes" + PERMISSION_KEY = "PermissionKey" + + +class ListSharesIncludeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ListSharesIncludeType.""" + + SNAPSHOTS = "snapshots" + METADATA = "metadata" + DELETED = "deleted" + + +class PermissionCopyModeType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """PermissionCopyModeType.""" + + SOURCE = "source" + OVERRIDE = "override" + + +class ShareAccessTier(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ShareAccessTier.""" + + TRANSACTION_OPTIMIZED = "TransactionOptimized" + HOT = "Hot" + COOL = "Cool" + + +class ShareRootSquash(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ShareRootSquash.""" + + NO_ROOT_SQUASH = "NoRootSquash" + ROOT_SQUASH = "RootSquash" + ALL_SQUASH = "AllSquash" + + +class ShareTokenIntent(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """ShareTokenIntent.""" + + BACKUP = "backup" + + +class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Error codes returned by the service.""" + + ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" + ACCOUNT_BEING_CREATED = "AccountBeingCreated" + ACCOUNT_IS_DISABLED = "AccountIsDisabled" + AUTHENTICATION_FAILED = "AuthenticationFailed" + AUTHORIZATION_FAILURE = "AuthorizationFailure" + CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" + CONDITION_NOT_MET = "ConditionNotMet" + EMPTY_METADATA_KEY = "EmptyMetadataKey" + INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" + INTERNAL_ERROR = "InternalError" + INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" + INVALID_HEADER_VALUE = "InvalidHeaderValue" + INVALID_HTTP_VERB = "InvalidHttpVerb" + INVALID_INPUT = "InvalidInput" + INVALID_MD5 = "InvalidMd5" + INVALID_METADATA = "InvalidMetadata" + INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" + INVALID_RANGE = "InvalidRange" + INVALID_RESOURCE_NAME = "InvalidResourceName" + INVALID_URI = "InvalidUri" + INVALID_XML_DOCUMENT = "InvalidXmlDocument" + INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" + MD5_MISMATCH = "Md5Mismatch" + METADATA_TOO_LARGE = "MetadataTooLarge" + MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" + MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" + MISSING_REQUIRED_HEADER = "MissingRequiredHeader" + MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" + MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" + OPERATION_TIMED_OUT = "OperationTimedOut" + OUT_OF_RANGE_INPUT = "OutOfRangeInput" + OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" + REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" + RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" + REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" + RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" + RESOURCE_NOT_FOUND = "ResourceNotFound" + SERVER_BUSY = "ServerBusy" + UNSUPPORTED_HEADER = "UnsupportedHeader" + UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" + UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" + UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" + CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" + CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" + DELETE_PENDING = "DeletePending" + DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" + FILE_LOCK_CONFLICT = "FileLockConflict" + INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" + PARENT_NOT_FOUND = "ParentNotFound" + READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" + SHARE_ALREADY_EXISTS = "ShareAlreadyExists" + SHARE_BEING_DELETED = "ShareBeingDeleted" + SHARE_DISABLED = "ShareDisabled" + SHARE_NOT_FOUND = "ShareNotFound" + SHARING_VIOLATION = "SharingViolation" + SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" + SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" + SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" + SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" + CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" + AUTHORIZATION_SOURCE_IP_MISMATCH = "AuthorizationSourceIPMismatch" + AUTHORIZATION_PROTOCOL_MISMATCH = "AuthorizationProtocolMismatch" + AUTHORIZATION_PERMISSION_MISMATCH = "AuthorizationPermissionMismatch" + AUTHORIZATION_SERVICE_MISMATCH = "AuthorizationServiceMismatch" + AUTHORIZATION_RESOURCE_TYPE_MISMATCH = "AuthorizationResourceTypeMismatch" + FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_models_py3.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_models_py3.py new file mode 100644 index 00000000000..c61b66152be --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_models_py3.py @@ -0,0 +1,1619 @@ +# coding=utf-8 +# pylint: disable=too-many-lines +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +import datetime +from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union + +from .. import _serialization + +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from .. import models as _models + + +class AccessPolicy(_serialization.Model): + """An Access policy. + + :ivar start: The date-time the policy is active. + :vartype start: str + :ivar expiry: The date-time the policy expires. + :vartype expiry: str + :ivar permission: The permissions for the ACL policy. + :vartype permission: str + """ + + _attribute_map = { + "start": {"key": "Start", "type": "str"}, + "expiry": {"key": "Expiry", "type": "str"}, + "permission": {"key": "Permission", "type": "str"}, + } + + def __init__( + self, + *, + start: Optional[str] = None, + expiry: Optional[str] = None, + permission: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword start: The date-time the policy is active. + :paramtype start: str + :keyword expiry: The date-time the policy expires. + :paramtype expiry: str + :keyword permission: The permissions for the ACL policy. + :paramtype permission: str + """ + super().__init__(**kwargs) + self.start = start + self.expiry = expiry + self.permission = permission + + +class ClearRange(_serialization.Model): + """ClearRange. + + All required parameters must be populated in order to send to Azure. + + :ivar start: Required. + :vartype start: int + :ivar end: Required. + :vartype end: int + """ + + _validation = { + "start": {"required": True}, + "end": {"required": True}, + } + + _attribute_map = { + "start": {"key": "Start", "type": "int", "xml": {"name": "Start"}}, + "end": {"key": "End", "type": "int", "xml": {"name": "End"}}, + } + _xml_map = {"name": "ClearRange"} + + def __init__(self, *, start: int, end: int, **kwargs: Any) -> None: + """ + :keyword start: Required. + :paramtype start: int + :keyword end: Required. + :paramtype end: int + """ + super().__init__(**kwargs) + self.start = start + self.end = end + + +class CopyFileSmbInfo(_serialization.Model): + """Parameter group. + + :ivar file_attributes: Specifies either the option to copy file attributes from a source + file(source) to a target file or a list of attributes to set on a target file. + :vartype file_attributes: str + :ivar file_creation_time: Specifies either the option to copy file creation time from a source + file(source) to a target file or a time value in ISO 8601 format to set as creation time on a + target file. + :vartype file_creation_time: str + :ivar file_last_write_time: Specifies either the option to copy file last write time from a + source file(source) to a target file or a time value in ISO 8601 format to set as last write + time on a target file. + :vartype file_last_write_time: str + :ivar file_change_time: Specifies either the option to copy file last write time from a source + file(source) to a target file or a time value in ISO 8601 format to set as last write time on a + target file. + :vartype file_change_time: str + :ivar file_permission_copy_mode: Specifies the option to copy file security descriptor from + source file or to set it using the value which is defined by the header value of + x-ms-file-permission or x-ms-file-permission-key. Known values are: "source" and "override". + :vartype file_permission_copy_mode: str or + ~azure.storage.fileshare.models.PermissionCopyModeType + :ivar ignore_read_only: Specifies the option to overwrite the target file if it already exists + and has read-only attribute set. + :vartype ignore_read_only: bool + :ivar set_archive_attribute: Specifies the option to set archive attribute on a target file. + True means archive attribute will be set on a target file despite attribute overrides or a + source file state. + :vartype set_archive_attribute: bool + """ + + _attribute_map = { + "file_attributes": {"key": "fileAttributes", "type": "str"}, + "file_creation_time": {"key": "fileCreationTime", "type": "str"}, + "file_last_write_time": {"key": "fileLastWriteTime", "type": "str"}, + "file_change_time": {"key": "fileChangeTime", "type": "str"}, + "file_permission_copy_mode": {"key": "filePermissionCopyMode", "type": "str"}, + "ignore_read_only": {"key": "ignoreReadOnly", "type": "bool"}, + "set_archive_attribute": {"key": "setArchiveAttribute", "type": "bool"}, + } + + def __init__( + self, + *, + file_attributes: Optional[str] = None, + file_creation_time: Optional[str] = None, + file_last_write_time: Optional[str] = None, + file_change_time: Optional[str] = None, + file_permission_copy_mode: Optional[Union[str, "_models.PermissionCopyModeType"]] = None, + ignore_read_only: Optional[bool] = None, + set_archive_attribute: Optional[bool] = None, + **kwargs: Any + ) -> None: + """ + :keyword file_attributes: Specifies either the option to copy file attributes from a source + file(source) to a target file or a list of attributes to set on a target file. + :paramtype file_attributes: str + :keyword file_creation_time: Specifies either the option to copy file creation time from a + source file(source) to a target file or a time value in ISO 8601 format to set as creation time + on a target file. + :paramtype file_creation_time: str + :keyword file_last_write_time: Specifies either the option to copy file last write time from a + source file(source) to a target file or a time value in ISO 8601 format to set as last write + time on a target file. + :paramtype file_last_write_time: str + :keyword file_change_time: Specifies either the option to copy file last write time from a + source file(source) to a target file or a time value in ISO 8601 format to set as last write + time on a target file. + :paramtype file_change_time: str + :keyword file_permission_copy_mode: Specifies the option to copy file security descriptor from + source file or to set it using the value which is defined by the header value of + x-ms-file-permission or x-ms-file-permission-key. Known values are: "source" and "override". + :paramtype file_permission_copy_mode: str or + ~azure.storage.fileshare.models.PermissionCopyModeType + :keyword ignore_read_only: Specifies the option to overwrite the target file if it already + exists and has read-only attribute set. + :paramtype ignore_read_only: bool + :keyword set_archive_attribute: Specifies the option to set archive attribute on a target file. + True means archive attribute will be set on a target file despite attribute overrides or a + source file state. + :paramtype set_archive_attribute: bool + """ + super().__init__(**kwargs) + self.file_attributes = file_attributes + self.file_creation_time = file_creation_time + self.file_last_write_time = file_last_write_time + self.file_change_time = file_change_time + self.file_permission_copy_mode = file_permission_copy_mode + self.ignore_read_only = ignore_read_only + self.set_archive_attribute = set_archive_attribute + + +class CorsRule(_serialization.Model): + """CORS is an HTTP feature that enables a web application running under one domain to access + resources in another domain. Web browsers implement a security restriction known as same-origin + policy that prevents a web page from calling APIs in a different domain; CORS provides a secure + way to allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :ivar allowed_origins: The origin domains that are permitted to make a request against the + storage service via CORS. The origin domain is the domain from which the request originates. + Note that the origin must be an exact case-sensitive match with the origin that the user age + sends to the service. You can also use the wildcard character '*' to allow all origin domains + to make requests via CORS. Required. + :vartype allowed_origins: str + :ivar allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a + CORS request. (comma separated). Required. + :vartype allowed_methods: str + :ivar allowed_headers: The request headers that the origin domain may specify on the CORS + request. Required. + :vartype allowed_headers: str + :ivar exposed_headers: The response headers that may be sent in the response to the CORS + request and exposed by the browser to the request issuer. Required. + :vartype exposed_headers: str + :ivar max_age_in_seconds: The maximum amount time that a browser should cache the preflight + OPTIONS request. Required. + :vartype max_age_in_seconds: int + """ + + _validation = { + "allowed_origins": {"required": True}, + "allowed_methods": {"required": True}, + "allowed_headers": {"required": True}, + "exposed_headers": {"required": True}, + "max_age_in_seconds": {"required": True, "minimum": 0}, + } + + _attribute_map = { + "allowed_origins": {"key": "AllowedOrigins", "type": "str"}, + "allowed_methods": {"key": "AllowedMethods", "type": "str"}, + "allowed_headers": {"key": "AllowedHeaders", "type": "str"}, + "exposed_headers": {"key": "ExposedHeaders", "type": "str"}, + "max_age_in_seconds": {"key": "MaxAgeInSeconds", "type": "int"}, + } + + def __init__( + self, + *, + allowed_origins: str, + allowed_methods: str, + allowed_headers: str, + exposed_headers: str, + max_age_in_seconds: int, + **kwargs: Any + ) -> None: + """ + :keyword allowed_origins: The origin domains that are permitted to make a request against the + storage service via CORS. The origin domain is the domain from which the request originates. + Note that the origin must be an exact case-sensitive match with the origin that the user age + sends to the service. You can also use the wildcard character '*' to allow all origin domains + to make requests via CORS. Required. + :paramtype allowed_origins: str + :keyword allowed_methods: The methods (HTTP request verbs) that the origin domain may use for a + CORS request. (comma separated). Required. + :paramtype allowed_methods: str + :keyword allowed_headers: The request headers that the origin domain may specify on the CORS + request. Required. + :paramtype allowed_headers: str + :keyword exposed_headers: The response headers that may be sent in the response to the CORS + request and exposed by the browser to the request issuer. Required. + :paramtype exposed_headers: str + :keyword max_age_in_seconds: The maximum amount time that a browser should cache the preflight + OPTIONS request. Required. + :paramtype max_age_in_seconds: int + """ + super().__init__(**kwargs) + self.allowed_origins = allowed_origins + self.allowed_methods = allowed_methods + self.allowed_headers = allowed_headers + self.exposed_headers = exposed_headers + self.max_age_in_seconds = max_age_in_seconds + + +class DestinationLeaseAccessConditions(_serialization.Model): + """Parameter group. + + :ivar destination_lease_id: Required if the destination file has an active infinite lease. The + lease ID specified for this header must match the lease ID of the destination file. If the + request does not include the lease ID or it is not valid, the operation fails with status code + 412 (Precondition Failed). If this header is specified and the destination file does not + currently have an active lease, the operation will also fail with status code 412 (Precondition + Failed). + :vartype destination_lease_id: str + """ + + _attribute_map = { + "destination_lease_id": {"key": "destinationLeaseId", "type": "str"}, + } + + def __init__(self, *, destination_lease_id: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword destination_lease_id: Required if the destination file has an active infinite lease. + The lease ID specified for this header must match the lease ID of the destination file. If the + request does not include the lease ID or it is not valid, the operation fails with status code + 412 (Precondition Failed). If this header is specified and the destination file does not + currently have an active lease, the operation will also fail with status code 412 (Precondition + Failed). + :paramtype destination_lease_id: str + """ + super().__init__(**kwargs) + self.destination_lease_id = destination_lease_id + + +class DirectoryItem(_serialization.Model): + """A listed directory item. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. + :vartype name: ~azure.storage.fileshare.models.StringEncoded + :ivar file_id: + :vartype file_id: str + :ivar properties: File properties. + :vartype properties: ~azure.storage.fileshare.models.FileProperty + :ivar attributes: + :vartype attributes: str + :ivar permission_key: + :vartype permission_key: str + """ + + _validation = { + "name": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "StringEncoded"}, + "file_id": {"key": "FileId", "type": "str"}, + "properties": {"key": "Properties", "type": "FileProperty"}, + "attributes": {"key": "Attributes", "type": "str"}, + "permission_key": {"key": "PermissionKey", "type": "str"}, + } + _xml_map = {"name": "Directory"} + + def __init__( + self, + *, + name: "_models.StringEncoded", + file_id: Optional[str] = None, + properties: Optional["_models.FileProperty"] = None, + attributes: Optional[str] = None, + permission_key: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: Required. + :paramtype name: ~azure.storage.fileshare.models.StringEncoded + :keyword file_id: + :paramtype file_id: str + :keyword properties: File properties. + :paramtype properties: ~azure.storage.fileshare.models.FileProperty + :keyword attributes: + :paramtype attributes: str + :keyword permission_key: + :paramtype permission_key: str + """ + super().__init__(**kwargs) + self.name = name + self.file_id = file_id + self.properties = properties + self.attributes = attributes + self.permission_key = permission_key + + +class FileHTTPHeaders(_serialization.Model): + """Parameter group. + + :ivar file_content_type: Sets the MIME content type of the file. The default type is + 'application/octet-stream'. + :vartype file_content_type: str + :ivar file_content_encoding: Specifies which content encodings have been applied to the file. + :vartype file_content_encoding: str + :ivar file_content_language: Specifies the natural languages used by this resource. + :vartype file_content_language: str + :ivar file_cache_control: Sets the file's cache control. The File service stores this value but + does not use or modify it. + :vartype file_cache_control: str + :ivar file_content_md5: Sets the file's MD5 hash. + :vartype file_content_md5: bytes + :ivar file_content_disposition: Sets the file's Content-Disposition header. + :vartype file_content_disposition: str + """ + + _attribute_map = { + "file_content_type": {"key": "fileContentType", "type": "str"}, + "file_content_encoding": {"key": "fileContentEncoding", "type": "str"}, + "file_content_language": {"key": "fileContentLanguage", "type": "str"}, + "file_cache_control": {"key": "fileCacheControl", "type": "str"}, + "file_content_md5": {"key": "fileContentMD5", "type": "bytearray"}, + "file_content_disposition": {"key": "fileContentDisposition", "type": "str"}, + } + + def __init__( + self, + *, + file_content_type: Optional[str] = None, + file_content_encoding: Optional[str] = None, + file_content_language: Optional[str] = None, + file_cache_control: Optional[str] = None, + file_content_md5: Optional[bytes] = None, + file_content_disposition: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword file_content_type: Sets the MIME content type of the file. The default type is + 'application/octet-stream'. + :paramtype file_content_type: str + :keyword file_content_encoding: Specifies which content encodings have been applied to the + file. + :paramtype file_content_encoding: str + :keyword file_content_language: Specifies the natural languages used by this resource. + :paramtype file_content_language: str + :keyword file_cache_control: Sets the file's cache control. The File service stores this value + but does not use or modify it. + :paramtype file_cache_control: str + :keyword file_content_md5: Sets the file's MD5 hash. + :paramtype file_content_md5: bytes + :keyword file_content_disposition: Sets the file's Content-Disposition header. + :paramtype file_content_disposition: str + """ + super().__init__(**kwargs) + self.file_content_type = file_content_type + self.file_content_encoding = file_content_encoding + self.file_content_language = file_content_language + self.file_cache_control = file_cache_control + self.file_content_md5 = file_content_md5 + self.file_content_disposition = file_content_disposition + + +class FileItem(_serialization.Model): + """A listed file item. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. + :vartype name: ~azure.storage.fileshare.models.StringEncoded + :ivar file_id: + :vartype file_id: str + :ivar properties: File properties. Required. + :vartype properties: ~azure.storage.fileshare.models.FileProperty + :ivar attributes: + :vartype attributes: str + :ivar permission_key: + :vartype permission_key: str + """ + + _validation = { + "name": {"required": True}, + "properties": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "StringEncoded"}, + "file_id": {"key": "FileId", "type": "str"}, + "properties": {"key": "Properties", "type": "FileProperty"}, + "attributes": {"key": "Attributes", "type": "str"}, + "permission_key": {"key": "PermissionKey", "type": "str"}, + } + _xml_map = {"name": "File"} + + def __init__( + self, + *, + name: "_models.StringEncoded", + properties: "_models.FileProperty", + file_id: Optional[str] = None, + attributes: Optional[str] = None, + permission_key: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: Required. + :paramtype name: ~azure.storage.fileshare.models.StringEncoded + :keyword file_id: + :paramtype file_id: str + :keyword properties: File properties. Required. + :paramtype properties: ~azure.storage.fileshare.models.FileProperty + :keyword attributes: + :paramtype attributes: str + :keyword permission_key: + :paramtype permission_key: str + """ + super().__init__(**kwargs) + self.name = name + self.file_id = file_id + self.properties = properties + self.attributes = attributes + self.permission_key = permission_key + + +class FileProperty(_serialization.Model): + """File properties. + + All required parameters must be populated in order to send to Azure. + + :ivar content_length: Content length of the file. This value may not be up-to-date since an SMB + client may have modified the file locally. The value of Content-Length may not reflect that + fact until the handle is closed or the op-lock is broken. To retrieve current property values, + call Get File Properties. Required. + :vartype content_length: int + :ivar creation_time: + :vartype creation_time: ~datetime.datetime + :ivar last_access_time: + :vartype last_access_time: ~datetime.datetime + :ivar last_write_time: + :vartype last_write_time: ~datetime.datetime + :ivar change_time: + :vartype change_time: ~datetime.datetime + :ivar last_modified: + :vartype last_modified: ~datetime.datetime + :ivar etag: + :vartype etag: str + """ + + _validation = { + "content_length": {"required": True}, + } + + _attribute_map = { + "content_length": {"key": "Content-Length", "type": "int"}, + "creation_time": {"key": "CreationTime", "type": "iso-8601"}, + "last_access_time": {"key": "LastAccessTime", "type": "iso-8601"}, + "last_write_time": {"key": "LastWriteTime", "type": "iso-8601"}, + "change_time": {"key": "ChangeTime", "type": "iso-8601"}, + "last_modified": {"key": "Last-Modified", "type": "rfc-1123"}, + "etag": {"key": "Etag", "type": "str"}, + } + + def __init__( + self, + *, + content_length: int, + creation_time: Optional[datetime.datetime] = None, + last_access_time: Optional[datetime.datetime] = None, + last_write_time: Optional[datetime.datetime] = None, + change_time: Optional[datetime.datetime] = None, + last_modified: Optional[datetime.datetime] = None, + etag: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword content_length: Content length of the file. This value may not be up-to-date since an + SMB client may have modified the file locally. The value of Content-Length may not reflect that + fact until the handle is closed or the op-lock is broken. To retrieve current property values, + call Get File Properties. Required. + :paramtype content_length: int + :keyword creation_time: + :paramtype creation_time: ~datetime.datetime + :keyword last_access_time: + :paramtype last_access_time: ~datetime.datetime + :keyword last_write_time: + :paramtype last_write_time: ~datetime.datetime + :keyword change_time: + :paramtype change_time: ~datetime.datetime + :keyword last_modified: + :paramtype last_modified: ~datetime.datetime + :keyword etag: + :paramtype etag: str + """ + super().__init__(**kwargs) + self.content_length = content_length + self.creation_time = creation_time + self.last_access_time = last_access_time + self.last_write_time = last_write_time + self.change_time = change_time + self.last_modified = last_modified + self.etag = etag + + +class FileRange(_serialization.Model): + """An Azure Storage file range. + + All required parameters must be populated in order to send to Azure. + + :ivar start: Start of the range. Required. + :vartype start: int + :ivar end: End of the range. Required. + :vartype end: int + """ + + _validation = { + "start": {"required": True}, + "end": {"required": True}, + } + + _attribute_map = { + "start": {"key": "Start", "type": "int"}, + "end": {"key": "End", "type": "int"}, + } + _xml_map = {"name": "Range"} + + def __init__(self, *, start: int, end: int, **kwargs: Any) -> None: + """ + :keyword start: Start of the range. Required. + :paramtype start: int + :keyword end: End of the range. Required. + :paramtype end: int + """ + super().__init__(**kwargs) + self.start = start + self.end = end + + +class FilesAndDirectoriesListSegment(_serialization.Model): + """Abstract for entries that can be listed from Directory. + + All required parameters must be populated in order to send to Azure. + + :ivar directory_items: Required. + :vartype directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :ivar file_items: Required. + :vartype file_items: list[~azure.storage.fileshare.models.FileItem] + """ + + _validation = { + "directory_items": {"required": True}, + "file_items": {"required": True}, + } + + _attribute_map = { + "directory_items": {"key": "DirectoryItems", "type": "[DirectoryItem]", "xml": {"itemsName": "Directory"}}, + "file_items": {"key": "FileItems", "type": "[FileItem]", "xml": {"itemsName": "File"}}, + } + _xml_map = {"name": "Entries"} + + def __init__( + self, *, directory_items: List["_models.DirectoryItem"], file_items: List["_models.FileItem"], **kwargs: Any + ) -> None: + """ + :keyword directory_items: Required. + :paramtype directory_items: list[~azure.storage.fileshare.models.DirectoryItem] + :keyword file_items: Required. + :paramtype file_items: list[~azure.storage.fileshare.models.FileItem] + """ + super().__init__(**kwargs) + self.directory_items = directory_items + self.file_items = file_items + + +class HandleItem(_serialization.Model): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :ivar handle_id: XSMB service handle ID. Required. + :vartype handle_id: str + :ivar path: Required. + :vartype path: ~azure.storage.fileshare.models.StringEncoded + :ivar file_id: FileId uniquely identifies the file or directory. Required. + :vartype file_id: str + :ivar parent_id: ParentId uniquely identifies the parent directory of the object. + :vartype parent_id: str + :ivar session_id: SMB session ID in context of which the file handle was opened. Required. + :vartype session_id: str + :ivar client_ip: Client IP that opened the handle. Required. + :vartype client_ip: str + :ivar open_time: Time when the session that previously opened the handle has last been + reconnected. (UTC). Required. + :vartype open_time: ~datetime.datetime + :ivar last_reconnect_time: Time handle was last connected to (UTC). + :vartype last_reconnect_time: ~datetime.datetime + """ + + _validation = { + "handle_id": {"required": True}, + "path": {"required": True}, + "file_id": {"required": True}, + "session_id": {"required": True}, + "client_ip": {"required": True}, + "open_time": {"required": True}, + } + + _attribute_map = { + "handle_id": {"key": "HandleId", "type": "str"}, + "path": {"key": "Path", "type": "StringEncoded"}, + "file_id": {"key": "FileId", "type": "str"}, + "parent_id": {"key": "ParentId", "type": "str"}, + "session_id": {"key": "SessionId", "type": "str"}, + "client_ip": {"key": "ClientIp", "type": "str"}, + "open_time": {"key": "OpenTime", "type": "rfc-1123"}, + "last_reconnect_time": {"key": "LastReconnectTime", "type": "rfc-1123"}, + } + _xml_map = {"name": "Handle"} + + def __init__( + self, + *, + handle_id: str, + path: "_models.StringEncoded", + file_id: str, + session_id: str, + client_ip: str, + open_time: datetime.datetime, + parent_id: Optional[str] = None, + last_reconnect_time: Optional[datetime.datetime] = None, + **kwargs: Any + ) -> None: + """ + :keyword handle_id: XSMB service handle ID. Required. + :paramtype handle_id: str + :keyword path: Required. + :paramtype path: ~azure.storage.fileshare.models.StringEncoded + :keyword file_id: FileId uniquely identifies the file or directory. Required. + :paramtype file_id: str + :keyword parent_id: ParentId uniquely identifies the parent directory of the object. + :paramtype parent_id: str + :keyword session_id: SMB session ID in context of which the file handle was opened. Required. + :paramtype session_id: str + :keyword client_ip: Client IP that opened the handle. Required. + :paramtype client_ip: str + :keyword open_time: Time when the session that previously opened the handle has last been + reconnected. (UTC). Required. + :paramtype open_time: ~datetime.datetime + :keyword last_reconnect_time: Time handle was last connected to (UTC). + :paramtype last_reconnect_time: ~datetime.datetime + """ + super().__init__(**kwargs) + self.handle_id = handle_id + self.path = path + self.file_id = file_id + self.parent_id = parent_id + self.session_id = session_id + self.client_ip = client_ip + self.open_time = open_time + self.last_reconnect_time = last_reconnect_time + + +class LeaseAccessConditions(_serialization.Model): + """Parameter group. + + :ivar lease_id: If specified, the operation only succeeds if the resource's lease is active and + matches this ID. + :vartype lease_id: str + """ + + _attribute_map = { + "lease_id": {"key": "leaseId", "type": "str"}, + } + + def __init__(self, *, lease_id: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword lease_id: If specified, the operation only succeeds if the resource's lease is active + and matches this ID. + :paramtype lease_id: str + """ + super().__init__(**kwargs) + self.lease_id = lease_id + + +class ListFilesAndDirectoriesSegmentResponse(_serialization.Model): # pylint: disable=too-many-instance-attributes + """An enumeration of directories and files. + + All required parameters must be populated in order to send to Azure. + + :ivar service_endpoint: Required. + :vartype service_endpoint: str + :ivar share_name: Required. + :vartype share_name: str + :ivar share_snapshot: + :vartype share_snapshot: str + :ivar encoded: + :vartype encoded: bool + :ivar directory_path: Required. + :vartype directory_path: str + :ivar prefix: Required. + :vartype prefix: ~azure.storage.fileshare.models.StringEncoded + :ivar marker: + :vartype marker: str + :ivar max_results: + :vartype max_results: int + :ivar segment: Abstract for entries that can be listed from Directory. Required. + :vartype segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :ivar next_marker: Required. + :vartype next_marker: str + :ivar directory_id: + :vartype directory_id: str + """ + + _validation = { + "service_endpoint": {"required": True}, + "share_name": {"required": True}, + "directory_path": {"required": True}, + "prefix": {"required": True}, + "segment": {"required": True}, + "next_marker": {"required": True}, + } + + _attribute_map = { + "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}}, + "share_name": {"key": "ShareName", "type": "str", "xml": {"attr": True}}, + "share_snapshot": {"key": "ShareSnapshot", "type": "str", "xml": {"attr": True}}, + "encoded": {"key": "Encoded", "type": "bool", "xml": {"attr": True}}, + "directory_path": {"key": "DirectoryPath", "type": "str", "xml": {"attr": True}}, + "prefix": {"key": "Prefix", "type": "StringEncoded"}, + "marker": {"key": "Marker", "type": "str"}, + "max_results": {"key": "MaxResults", "type": "int"}, + "segment": {"key": "Segment", "type": "FilesAndDirectoriesListSegment"}, + "next_marker": {"key": "NextMarker", "type": "str"}, + "directory_id": {"key": "DirectoryId", "type": "str"}, + } + _xml_map = {"name": "EnumerationResults"} + + def __init__( + self, + *, + service_endpoint: str, + share_name: str, + directory_path: str, + prefix: "_models.StringEncoded", + segment: "_models.FilesAndDirectoriesListSegment", + next_marker: str, + share_snapshot: Optional[str] = None, + encoded: Optional[bool] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + directory_id: Optional[str] = None, + **kwargs: Any + ) -> None: + """ + :keyword service_endpoint: Required. + :paramtype service_endpoint: str + :keyword share_name: Required. + :paramtype share_name: str + :keyword share_snapshot: + :paramtype share_snapshot: str + :keyword encoded: + :paramtype encoded: bool + :keyword directory_path: Required. + :paramtype directory_path: str + :keyword prefix: Required. + :paramtype prefix: ~azure.storage.fileshare.models.StringEncoded + :keyword marker: + :paramtype marker: str + :keyword max_results: + :paramtype max_results: int + :keyword segment: Abstract for entries that can be listed from Directory. Required. + :paramtype segment: ~azure.storage.fileshare.models.FilesAndDirectoriesListSegment + :keyword next_marker: Required. + :paramtype next_marker: str + :keyword directory_id: + :paramtype directory_id: str + """ + super().__init__(**kwargs) + self.service_endpoint = service_endpoint + self.share_name = share_name + self.share_snapshot = share_snapshot + self.encoded = encoded + self.directory_path = directory_path + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.segment = segment + self.next_marker = next_marker + self.directory_id = directory_id + + +class ListHandlesResponse(_serialization.Model): + """An enumeration of handles. + + All required parameters must be populated in order to send to Azure. + + :ivar handle_list: + :vartype handle_list: list[~azure.storage.fileshare.models.HandleItem] + :ivar next_marker: Required. + :vartype next_marker: str + """ + + _validation = { + "next_marker": {"required": True}, + } + + _attribute_map = { + "handle_list": { + "key": "HandleList", + "type": "[HandleItem]", + "xml": {"name": "Entries", "wrapped": True, "itemsName": "Handle"}, + }, + "next_marker": {"key": "NextMarker", "type": "str"}, + } + _xml_map = {"name": "EnumerationResults"} + + def __init__( + self, *, next_marker: str, handle_list: Optional[List["_models.HandleItem"]] = None, **kwargs: Any + ) -> None: + """ + :keyword handle_list: + :paramtype handle_list: list[~azure.storage.fileshare.models.HandleItem] + :keyword next_marker: Required. + :paramtype next_marker: str + """ + super().__init__(**kwargs) + self.handle_list = handle_list + self.next_marker = next_marker + + +class ListSharesResponse(_serialization.Model): + """An enumeration of shares. + + All required parameters must be populated in order to send to Azure. + + :ivar service_endpoint: Required. + :vartype service_endpoint: str + :ivar prefix: + :vartype prefix: str + :ivar marker: + :vartype marker: str + :ivar max_results: + :vartype max_results: int + :ivar share_items: + :vartype share_items: list[~azure.storage.fileshare.models.ShareItemInternal] + :ivar next_marker: Required. + :vartype next_marker: str + """ + + _validation = { + "service_endpoint": {"required": True}, + "next_marker": {"required": True}, + } + + _attribute_map = { + "service_endpoint": {"key": "ServiceEndpoint", "type": "str", "xml": {"attr": True}}, + "prefix": {"key": "Prefix", "type": "str"}, + "marker": {"key": "Marker", "type": "str"}, + "max_results": {"key": "MaxResults", "type": "int"}, + "share_items": { + "key": "ShareItems", + "type": "[ShareItemInternal]", + "xml": {"name": "Shares", "wrapped": True, "itemsName": "Share"}, + }, + "next_marker": {"key": "NextMarker", "type": "str"}, + } + _xml_map = {"name": "EnumerationResults"} + + def __init__( + self, + *, + service_endpoint: str, + next_marker: str, + prefix: Optional[str] = None, + marker: Optional[str] = None, + max_results: Optional[int] = None, + share_items: Optional[List["_models.ShareItemInternal"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword service_endpoint: Required. + :paramtype service_endpoint: str + :keyword prefix: + :paramtype prefix: str + :keyword marker: + :paramtype marker: str + :keyword max_results: + :paramtype max_results: int + :keyword share_items: + :paramtype share_items: list[~azure.storage.fileshare.models.ShareItemInternal] + :keyword next_marker: Required. + :paramtype next_marker: str + """ + super().__init__(**kwargs) + self.service_endpoint = service_endpoint + self.prefix = prefix + self.marker = marker + self.max_results = max_results + self.share_items = share_items + self.next_marker = next_marker + + +class Metrics(_serialization.Model): + """Storage Analytics metrics for file service. + + All required parameters must be populated in order to send to Azure. + + :ivar version: The version of Storage Analytics to configure. Required. + :vartype version: str + :ivar enabled: Indicates whether metrics are enabled for the File service. Required. + :vartype enabled: bool + :ivar include_apis: Indicates whether metrics should generate summary statistics for called API + operations. + :vartype include_apis: bool + :ivar retention_policy: The retention policy. + :vartype retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + + _validation = { + "version": {"required": True}, + "enabled": {"required": True}, + } + + _attribute_map = { + "version": {"key": "Version", "type": "str"}, + "enabled": {"key": "Enabled", "type": "bool"}, + "include_apis": {"key": "IncludeAPIs", "type": "bool"}, + "retention_policy": {"key": "RetentionPolicy", "type": "RetentionPolicy"}, + } + + def __init__( + self, + *, + version: str, + enabled: bool, + include_apis: Optional[bool] = None, + retention_policy: Optional["_models.RetentionPolicy"] = None, + **kwargs: Any + ) -> None: + """ + :keyword version: The version of Storage Analytics to configure. Required. + :paramtype version: str + :keyword enabled: Indicates whether metrics are enabled for the File service. Required. + :paramtype enabled: bool + :keyword include_apis: Indicates whether metrics should generate summary statistics for called + API operations. + :paramtype include_apis: bool + :keyword retention_policy: The retention policy. + :paramtype retention_policy: ~azure.storage.fileshare.models.RetentionPolicy + """ + super().__init__(**kwargs) + self.version = version + self.enabled = enabled + self.include_apis = include_apis + self.retention_policy = retention_policy + + +class RetentionPolicy(_serialization.Model): + """The retention policy. + + All required parameters must be populated in order to send to Azure. + + :ivar enabled: Indicates whether a retention policy is enabled for the File service. If false, + metrics data is retained, and the user is responsible for deleting it. Required. + :vartype enabled: bool + :ivar days: Indicates the number of days that metrics data should be retained. All data older + than this value will be deleted. Metrics data is deleted on a best-effort basis after the + retention period expires. + :vartype days: int + """ + + _validation = { + "enabled": {"required": True}, + "days": {"maximum": 365, "minimum": 1}, + } + + _attribute_map = { + "enabled": {"key": "Enabled", "type": "bool"}, + "days": {"key": "Days", "type": "int"}, + } + + def __init__(self, *, enabled: bool, days: Optional[int] = None, **kwargs: Any) -> None: + """ + :keyword enabled: Indicates whether a retention policy is enabled for the File service. If + false, metrics data is retained, and the user is responsible for deleting it. Required. + :paramtype enabled: bool + :keyword days: Indicates the number of days that metrics data should be retained. All data + older than this value will be deleted. Metrics data is deleted on a best-effort basis after the + retention period expires. + :paramtype days: int + """ + super().__init__(**kwargs) + self.enabled = enabled + self.days = days + + +class ShareFileRangeList(_serialization.Model): + """The list of file ranges. + + :ivar ranges: + :vartype ranges: list[~azure.storage.fileshare.models.FileRange] + :ivar clear_ranges: + :vartype clear_ranges: list[~azure.storage.fileshare.models.ClearRange] + """ + + _attribute_map = { + "ranges": {"key": "Ranges", "type": "[FileRange]", "xml": {"itemsName": "Range"}}, + "clear_ranges": {"key": "ClearRanges", "type": "[ClearRange]", "xml": {"itemsName": "ClearRange"}}, + } + + def __init__( + self, + *, + ranges: Optional[List["_models.FileRange"]] = None, + clear_ranges: Optional[List["_models.ClearRange"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword ranges: + :paramtype ranges: list[~azure.storage.fileshare.models.FileRange] + :keyword clear_ranges: + :paramtype clear_ranges: list[~azure.storage.fileshare.models.ClearRange] + """ + super().__init__(**kwargs) + self.ranges = ranges + self.clear_ranges = clear_ranges + + +class ShareItemInternal(_serialization.Model): + """A listed Azure Storage share item. + + All required parameters must be populated in order to send to Azure. + + :ivar name: Required. + :vartype name: str + :ivar snapshot: + :vartype snapshot: str + :ivar deleted: + :vartype deleted: bool + :ivar version: + :vartype version: str + :ivar properties: Properties of a share. Required. + :vartype properties: ~azure.storage.fileshare.models.SharePropertiesInternal + :ivar metadata: Dictionary of :code:``. + :vartype metadata: dict[str, str] + """ + + _validation = { + "name": {"required": True}, + "properties": {"required": True}, + } + + _attribute_map = { + "name": {"key": "Name", "type": "str"}, + "snapshot": {"key": "Snapshot", "type": "str"}, + "deleted": {"key": "Deleted", "type": "bool"}, + "version": {"key": "Version", "type": "str"}, + "properties": {"key": "Properties", "type": "SharePropertiesInternal"}, + "metadata": {"key": "Metadata", "type": "{str}"}, + } + _xml_map = {"name": "Share"} + + def __init__( + self, + *, + name: str, + properties: "_models.SharePropertiesInternal", + snapshot: Optional[str] = None, + deleted: Optional[bool] = None, + version: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + **kwargs: Any + ) -> None: + """ + :keyword name: Required. + :paramtype name: str + :keyword snapshot: + :paramtype snapshot: str + :keyword deleted: + :paramtype deleted: bool + :keyword version: + :paramtype version: str + :keyword properties: Properties of a share. Required. + :paramtype properties: ~azure.storage.fileshare.models.SharePropertiesInternal + :keyword metadata: Dictionary of :code:``. + :paramtype metadata: dict[str, str] + """ + super().__init__(**kwargs) + self.name = name + self.snapshot = snapshot + self.deleted = deleted + self.version = version + self.properties = properties + self.metadata = metadata + + +class SharePermission(_serialization.Model): + """A permission (a security descriptor) at the share level. + + All required parameters must be populated in order to send to Azure. + + :ivar permission: The permission in the Security Descriptor Definition Language (SDDL). + Required. + :vartype permission: str + """ + + _validation = { + "permission": {"required": True}, + } + + _attribute_map = { + "permission": {"key": "permission", "type": "str"}, + } + + def __init__(self, *, permission: str, **kwargs: Any) -> None: + """ + :keyword permission: The permission in the Security Descriptor Definition Language (SDDL). + Required. + :paramtype permission: str + """ + super().__init__(**kwargs) + self.permission = permission + + +class SharePropertiesInternal(_serialization.Model): # pylint: disable=too-many-instance-attributes + """Properties of a share. + + All required parameters must be populated in order to send to Azure. + + :ivar last_modified: Required. + :vartype last_modified: ~datetime.datetime + :ivar etag: Required. + :vartype etag: str + :ivar quota: Required. + :vartype quota: int + :ivar provisioned_iops: + :vartype provisioned_iops: int + :ivar provisioned_ingress_m_bps: + :vartype provisioned_ingress_m_bps: int + :ivar provisioned_egress_m_bps: + :vartype provisioned_egress_m_bps: int + :ivar provisioned_bandwidth_mi_bps: + :vartype provisioned_bandwidth_mi_bps: int + :ivar next_allowed_quota_downgrade_time: + :vartype next_allowed_quota_downgrade_time: ~datetime.datetime + :ivar deleted_time: + :vartype deleted_time: ~datetime.datetime + :ivar remaining_retention_days: + :vartype remaining_retention_days: int + :ivar access_tier: + :vartype access_tier: str + :ivar access_tier_change_time: + :vartype access_tier_change_time: ~datetime.datetime + :ivar access_tier_transition_state: + :vartype access_tier_transition_state: str + :ivar lease_status: The current lease status of the share. Known values are: "locked" and + "unlocked". + :vartype lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType + :ivar lease_state: Lease state of the share. Known values are: "available", "leased", + "expired", "breaking", and "broken". + :vartype lease_state: str or ~azure.storage.fileshare.models.LeaseStateType + :ivar lease_duration: When a share is leased, specifies whether the lease is of infinite or + fixed duration. Known values are: "infinite" and "fixed". + :vartype lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType + :ivar enabled_protocols: + :vartype enabled_protocols: str + :ivar root_squash: Known values are: "NoRootSquash", "RootSquash", and "AllSquash". + :vartype root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + """ + + _validation = { + "last_modified": {"required": True}, + "etag": {"required": True}, + "quota": {"required": True}, + } + + _attribute_map = { + "last_modified": {"key": "Last-Modified", "type": "rfc-1123"}, + "etag": {"key": "Etag", "type": "str"}, + "quota": {"key": "Quota", "type": "int"}, + "provisioned_iops": {"key": "ProvisionedIops", "type": "int"}, + "provisioned_ingress_m_bps": {"key": "ProvisionedIngressMBps", "type": "int"}, + "provisioned_egress_m_bps": {"key": "ProvisionedEgressMBps", "type": "int"}, + "provisioned_bandwidth_mi_bps": {"key": "ProvisionedBandwidthMiBps", "type": "int"}, + "next_allowed_quota_downgrade_time": {"key": "NextAllowedQuotaDowngradeTime", "type": "rfc-1123"}, + "deleted_time": {"key": "DeletedTime", "type": "rfc-1123"}, + "remaining_retention_days": {"key": "RemainingRetentionDays", "type": "int"}, + "access_tier": {"key": "AccessTier", "type": "str"}, + "access_tier_change_time": {"key": "AccessTierChangeTime", "type": "rfc-1123"}, + "access_tier_transition_state": {"key": "AccessTierTransitionState", "type": "str"}, + "lease_status": {"key": "LeaseStatus", "type": "str"}, + "lease_state": {"key": "LeaseState", "type": "str"}, + "lease_duration": {"key": "LeaseDuration", "type": "str"}, + "enabled_protocols": {"key": "EnabledProtocols", "type": "str"}, + "root_squash": {"key": "RootSquash", "type": "str"}, + } + + def __init__( + self, + *, + last_modified: datetime.datetime, + etag: str, + quota: int, + provisioned_iops: Optional[int] = None, + provisioned_ingress_m_bps: Optional[int] = None, + provisioned_egress_m_bps: Optional[int] = None, + provisioned_bandwidth_mi_bps: Optional[int] = None, + next_allowed_quota_downgrade_time: Optional[datetime.datetime] = None, + deleted_time: Optional[datetime.datetime] = None, + remaining_retention_days: Optional[int] = None, + access_tier: Optional[str] = None, + access_tier_change_time: Optional[datetime.datetime] = None, + access_tier_transition_state: Optional[str] = None, + lease_status: Optional[Union[str, "_models.LeaseStatusType"]] = None, + lease_state: Optional[Union[str, "_models.LeaseStateType"]] = None, + lease_duration: Optional[Union[str, "_models.LeaseDurationType"]] = None, + enabled_protocols: Optional[str] = None, + root_squash: Optional[Union[str, "_models.ShareRootSquash"]] = None, + **kwargs: Any + ) -> None: + """ + :keyword last_modified: Required. + :paramtype last_modified: ~datetime.datetime + :keyword etag: Required. + :paramtype etag: str + :keyword quota: Required. + :paramtype quota: int + :keyword provisioned_iops: + :paramtype provisioned_iops: int + :keyword provisioned_ingress_m_bps: + :paramtype provisioned_ingress_m_bps: int + :keyword provisioned_egress_m_bps: + :paramtype provisioned_egress_m_bps: int + :keyword provisioned_bandwidth_mi_bps: + :paramtype provisioned_bandwidth_mi_bps: int + :keyword next_allowed_quota_downgrade_time: + :paramtype next_allowed_quota_downgrade_time: ~datetime.datetime + :keyword deleted_time: + :paramtype deleted_time: ~datetime.datetime + :keyword remaining_retention_days: + :paramtype remaining_retention_days: int + :keyword access_tier: + :paramtype access_tier: str + :keyword access_tier_change_time: + :paramtype access_tier_change_time: ~datetime.datetime + :keyword access_tier_transition_state: + :paramtype access_tier_transition_state: str + :keyword lease_status: The current lease status of the share. Known values are: "locked" and + "unlocked". + :paramtype lease_status: str or ~azure.storage.fileshare.models.LeaseStatusType + :keyword lease_state: Lease state of the share. Known values are: "available", "leased", + "expired", "breaking", and "broken". + :paramtype lease_state: str or ~azure.storage.fileshare.models.LeaseStateType + :keyword lease_duration: When a share is leased, specifies whether the lease is of infinite or + fixed duration. Known values are: "infinite" and "fixed". + :paramtype lease_duration: str or ~azure.storage.fileshare.models.LeaseDurationType + :keyword enabled_protocols: + :paramtype enabled_protocols: str + :keyword root_squash: Known values are: "NoRootSquash", "RootSquash", and "AllSquash". + :paramtype root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + """ + super().__init__(**kwargs) + self.last_modified = last_modified + self.etag = etag + self.quota = quota + self.provisioned_iops = provisioned_iops + self.provisioned_ingress_m_bps = provisioned_ingress_m_bps + self.provisioned_egress_m_bps = provisioned_egress_m_bps + self.provisioned_bandwidth_mi_bps = provisioned_bandwidth_mi_bps + self.next_allowed_quota_downgrade_time = next_allowed_quota_downgrade_time + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days + self.access_tier = access_tier + self.access_tier_change_time = access_tier_change_time + self.access_tier_transition_state = access_tier_transition_state + self.lease_status = lease_status + self.lease_state = lease_state + self.lease_duration = lease_duration + self.enabled_protocols = enabled_protocols + self.root_squash = root_squash + + +class ShareProtocolSettings(_serialization.Model): + """Protocol settings. + + :ivar smb: Settings for SMB protocol. + :vartype smb: ~azure.storage.fileshare.models.ShareSmbSettings + """ + + _attribute_map = { + "smb": {"key": "Smb", "type": "ShareSmbSettings"}, + } + _xml_map = {"name": "ProtocolSettings"} + + def __init__(self, *, smb: Optional["_models.ShareSmbSettings"] = None, **kwargs: Any) -> None: + """ + :keyword smb: Settings for SMB protocol. + :paramtype smb: ~azure.storage.fileshare.models.ShareSmbSettings + """ + super().__init__(**kwargs) + self.smb = smb + + +class ShareSmbSettings(_serialization.Model): + """Settings for SMB protocol. + + :ivar multichannel: Settings for SMB Multichannel. + :vartype multichannel: ~azure.storage.fileshare.models.SmbMultichannel + """ + + _attribute_map = { + "multichannel": {"key": "Multichannel", "type": "SmbMultichannel"}, + } + _xml_map = {"name": "SMB"} + + def __init__(self, *, multichannel: Optional["_models.SmbMultichannel"] = None, **kwargs: Any) -> None: + """ + :keyword multichannel: Settings for SMB Multichannel. + :paramtype multichannel: ~azure.storage.fileshare.models.SmbMultichannel + """ + super().__init__(**kwargs) + self.multichannel = multichannel + + +class ShareStats(_serialization.Model): + """Stats for the share. + + All required parameters must be populated in order to send to Azure. + + :ivar share_usage_bytes: The approximate size of the data stored in bytes. Note that this value + may not include all recently created or recently resized files. Required. + :vartype share_usage_bytes: int + """ + + _validation = { + "share_usage_bytes": {"required": True}, + } + + _attribute_map = { + "share_usage_bytes": {"key": "ShareUsageBytes", "type": "int"}, + } + + def __init__(self, *, share_usage_bytes: int, **kwargs: Any) -> None: + """ + :keyword share_usage_bytes: The approximate size of the data stored in bytes. Note that this + value may not include all recently created or recently resized files. Required. + :paramtype share_usage_bytes: int + """ + super().__init__(**kwargs) + self.share_usage_bytes = share_usage_bytes + + +class SignedIdentifier(_serialization.Model): + """Signed identifier. + + All required parameters must be populated in order to send to Azure. + + :ivar id: A unique id. Required. + :vartype id: str + :ivar access_policy: The access policy. + :vartype access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + + _validation = { + "id": {"required": True}, + } + + _attribute_map = { + "id": {"key": "Id", "type": "str"}, + "access_policy": {"key": "AccessPolicy", "type": "AccessPolicy"}, + } + + def __init__( + self, + *, + id: str, # pylint: disable=redefined-builtin + access_policy: Optional["_models.AccessPolicy"] = None, + **kwargs: Any + ) -> None: + """ + :keyword id: A unique id. Required. + :paramtype id: str + :keyword access_policy: The access policy. + :paramtype access_policy: ~azure.storage.fileshare.models.AccessPolicy + """ + super().__init__(**kwargs) + self.id = id + self.access_policy = access_policy + + +class SmbMultichannel(_serialization.Model): + """Settings for SMB multichannel. + + :ivar enabled: If SMB multichannel is enabled. + :vartype enabled: bool + """ + + _attribute_map = { + "enabled": {"key": "Enabled", "type": "bool"}, + } + _xml_map = {"name": "Multichannel"} + + def __init__(self, *, enabled: Optional[bool] = None, **kwargs: Any) -> None: + """ + :keyword enabled: If SMB multichannel is enabled. + :paramtype enabled: bool + """ + super().__init__(**kwargs) + self.enabled = enabled + + +class SourceLeaseAccessConditions(_serialization.Model): + """Parameter group. + + :ivar source_lease_id: Required if the source file has an active infinite lease. + :vartype source_lease_id: str + """ + + _attribute_map = { + "source_lease_id": {"key": "sourceLeaseId", "type": "str"}, + } + + def __init__(self, *, source_lease_id: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword source_lease_id: Required if the source file has an active infinite lease. + :paramtype source_lease_id: str + """ + super().__init__(**kwargs) + self.source_lease_id = source_lease_id + + +class SourceModifiedAccessConditions(_serialization.Model): + """Parameter group. + + :ivar source_if_match_crc64: Specify the crc64 value to operate only on range with a matching + crc64 checksum. + :vartype source_if_match_crc64: bytes + :ivar source_if_none_match_crc64: Specify the crc64 value to operate only on range without a + matching crc64 checksum. + :vartype source_if_none_match_crc64: bytes + """ + + _attribute_map = { + "source_if_match_crc64": {"key": "sourceIfMatchCrc64", "type": "bytearray"}, + "source_if_none_match_crc64": {"key": "sourceIfNoneMatchCrc64", "type": "bytearray"}, + } + + def __init__( + self, + *, + source_if_match_crc64: Optional[bytes] = None, + source_if_none_match_crc64: Optional[bytes] = None, + **kwargs: Any + ) -> None: + """ + :keyword source_if_match_crc64: Specify the crc64 value to operate only on range with a + matching crc64 checksum. + :paramtype source_if_match_crc64: bytes + :keyword source_if_none_match_crc64: Specify the crc64 value to operate only on range without a + matching crc64 checksum. + :paramtype source_if_none_match_crc64: bytes + """ + super().__init__(**kwargs) + self.source_if_match_crc64 = source_if_match_crc64 + self.source_if_none_match_crc64 = source_if_none_match_crc64 + + +class StorageError(_serialization.Model): + """StorageError. + + :ivar message: + :vartype message: str + """ + + _attribute_map = { + "message": {"key": "Message", "type": "str"}, + } + + def __init__(self, *, message: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword message: + :paramtype message: str + """ + super().__init__(**kwargs) + self.message = message + + +class StorageServiceProperties(_serialization.Model): + """Storage service properties. + + :ivar hour_metrics: A summary of request statistics grouped by API in hourly aggregates for + files. + :vartype hour_metrics: ~azure.storage.fileshare.models.Metrics + :ivar minute_metrics: A summary of request statistics grouped by API in minute aggregates for + files. + :vartype minute_metrics: ~azure.storage.fileshare.models.Metrics + :ivar cors: The set of CORS rules. + :vartype cors: list[~azure.storage.fileshare.models.CorsRule] + :ivar protocol: Protocol settings. + :vartype protocol: ~azure.storage.fileshare.models.ShareProtocolSettings + """ + + _attribute_map = { + "hour_metrics": {"key": "HourMetrics", "type": "Metrics"}, + "minute_metrics": {"key": "MinuteMetrics", "type": "Metrics"}, + "cors": {"key": "Cors", "type": "[CorsRule]", "xml": {"wrapped": True}}, + "protocol": {"key": "Protocol", "type": "ShareProtocolSettings"}, + } + + def __init__( + self, + *, + hour_metrics: Optional["_models.Metrics"] = None, + minute_metrics: Optional["_models.Metrics"] = None, + cors: Optional[List["_models.CorsRule"]] = None, + protocol: Optional["_models.ShareProtocolSettings"] = None, + **kwargs: Any + ) -> None: + """ + :keyword hour_metrics: A summary of request statistics grouped by API in hourly aggregates for + files. + :paramtype hour_metrics: ~azure.storage.fileshare.models.Metrics + :keyword minute_metrics: A summary of request statistics grouped by API in minute aggregates + for files. + :paramtype minute_metrics: ~azure.storage.fileshare.models.Metrics + :keyword cors: The set of CORS rules. + :paramtype cors: list[~azure.storage.fileshare.models.CorsRule] + :keyword protocol: Protocol settings. + :paramtype protocol: ~azure.storage.fileshare.models.ShareProtocolSettings + """ + super().__init__(**kwargs) + self.hour_metrics = hour_metrics + self.minute_metrics = minute_metrics + self.cors = cors + self.protocol = protocol + + +class StringEncoded(_serialization.Model): + """StringEncoded. + + :ivar encoded: + :vartype encoded: bool + :ivar content: + :vartype content: str + """ + + _attribute_map = { + "encoded": {"key": "Encoded", "type": "bool", "xml": {"name": "Encoded", "attr": True}}, + "content": {"key": "content", "type": "str", "xml": {"text": True}}, + } + + def __init__(self, *, encoded: Optional[bool] = None, content: Optional[str] = None, **kwargs: Any) -> None: + """ + :keyword encoded: + :paramtype encoded: bool + :keyword content: + :paramtype content: str + """ + super().__init__(**kwargs) + self.encoded = encoded + self.content = content diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/models/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/__init__.py new file mode 100644 index 00000000000..5e0376c1c30 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/__init__.py @@ -0,0 +1,25 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from ._service_operations import ServiceOperations +from ._share_operations import ShareOperations +from ._directory_operations import DirectoryOperations +from ._file_operations import FileOperations + +from ._patch import __all__ as _patch_all +from ._patch import * # pylint: disable=unused-wildcard-import +from ._patch import patch_sdk as _patch_sdk + +__all__ = [ + "ServiceOperations", + "ShareOperations", + "DirectoryOperations", + "FileOperations", +] +__all__.extend([p for p in _patch_all if p not in __all__]) +_patch_sdk() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_directory_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_directory_operations.py new file mode 100644 index 00000000000..7173b6c9e3f --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_directory_operations.py @@ -0,0 +1,1535 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_create_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if file_permission is not None: + _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str") + if file_permission_key is not None: + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str") + if file_creation_time is not None: + _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str") + if file_last_write_time is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str") + if file_change_time is not None: + _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_properties_request( + url: str, + *, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + url: str, + *, + timeout: Optional[int] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_properties_request( + url: str, + *, + timeout: Optional[int] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if file_permission is not None: + _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str") + if file_permission_key is not None: + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str") + if file_creation_time is not None: + _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str") + if file_last_write_time is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str") + if file_change_time is not None: + _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_metadata_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_files_and_directories_segment_request( + url: str, + *, + prefix: Optional[str] = None, + sharesnapshot: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + include: Optional[List[Union[str, _models.ListFilesIncludeType]]] = None, + include_extended_info: Optional[bool] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if prefix is not None: + _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str") + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if include is not None: + _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if include_extended_info is not None: + _headers["x-ms-file-extended-info"] = _SERIALIZER.header("include_extended_info", include_extended_info, "bool") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_handles_request( + url: str, + *, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + if recursive is not None: + _headers["x-ms-recursive"] = _SERIALIZER.header("recursive", recursive, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_force_close_handles_request( + url: str, + *, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-handle-id"] = _SERIALIZER.header("handle_id", handle_id, "str") + if recursive is not None: + _headers["x-ms-recursive"] = _SERIALIZER.header("recursive", recursive, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_rename_request( + url: str, + *, + rename_source: str, + timeout: Optional[int] = None, + replace_if_exists: Optional[bool] = None, + ignore_read_only: Optional[bool] = None, + source_lease_id: Optional[str] = None, + destination_lease_id: Optional[str] = None, + file_attributes: Optional[str] = None, + file_creation_time: Optional[str] = None, + file_last_write_time: Optional[str] = None, + file_change_time: Optional[str] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["x-ms-file-rename-source"] = _SERIALIZER.header("rename_source", rename_source, "str") + if replace_if_exists is not None: + _headers["x-ms-file-rename-replace-if-exists"] = _SERIALIZER.header( + "replace_if_exists", replace_if_exists, "bool" + ) + if ignore_read_only is not None: + _headers["x-ms-file-rename-ignore-readonly"] = _SERIALIZER.header("ignore_read_only", ignore_read_only, "bool") + if source_lease_id is not None: + _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str") + if destination_lease_id is not None: + _headers["x-ms-destination-lease-id"] = _SERIALIZER.header("destination_lease_id", destination_lease_id, "str") + if file_attributes is not None: + _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str") + if file_creation_time is not None: + _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str") + if file_last_write_time is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str") + if file_change_time is not None: + _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str") + if file_permission is not None: + _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str") + if file_permission_key is not None: + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if allow_source_trailing_dot is not None: + _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header( + "allow_source_trailing_dot", allow_source_trailing_dot, "bool" + ) + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +class DirectoryOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.AzureFileStorage`'s + :attr:`directory` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def create( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + **kwargs: Any + ) -> None: + """Creates a new directory under the specified share or parent directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_create_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def get_properties( # pylint: disable=inconsistent-return-statements + self, sharesnapshot: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Returns all system properties for the specified directory, and can also be used to check the + existence of a directory. The data returned does not include the files in the directory or any + subdirectories. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_get_properties_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Removes the specified empty directory. Note that the directory must be empty before it can be + deleted. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_delete_request( + url=self._config.url, + timeout=timeout, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def set_properties( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + **kwargs: Any + ) -> None: + """Sets properties on the directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def set_metadata( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any + ) -> None: + """Updates user defined metadata for the specified directory. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def list_files_and_directories_segment( + self, + prefix: Optional[str] = None, + sharesnapshot: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + include: Optional[List[Union[str, _models.ListFilesIncludeType]]] = None, + include_extended_info: Optional[bool] = None, + **kwargs: Any + ) -> _models.ListFilesAndDirectoriesSegmentResponse: + """Returns a list of files or directories under the specified share or directory. It lists the + contents only for a single level of the directory hierarchy. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. Default value is None. + :type prefix: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.fileshare.models.ListFilesIncludeType] + :param include_extended_info: Include extended information. Default value is None. + :type include_extended_info: bool + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListFilesAndDirectoriesSegmentResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListFilesAndDirectoriesSegmentResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list")) + cls: ClsType[_models.ListFilesAndDirectoriesSegmentResponse] = kwargs.pop("cls", None) + + request = build_list_files_and_directories_segment_request( + url=self._config.url, + prefix=prefix, + sharesnapshot=sharesnapshot, + marker=marker, + maxresults=maxresults, + timeout=timeout, + include=include, + include_extended_info=include_extended_info, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.list_files_and_directories_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListFilesAndDirectoriesSegmentResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_files_and_directories_segment.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def list_handles( + self, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> _models.ListHandlesResponse: + """Lists handles for directory. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. Default value is None. + :type recursive: bool + :keyword comp: comp. Default value is "listhandles". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles")) + cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None) + + request = build_list_handles_request( + url=self._config.url, + marker=marker, + maxresults=maxresults, + timeout=timeout, + sharesnapshot=sharesnapshot, + recursive=recursive, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.list_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListHandlesResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_handles.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def force_close_handles( # pylint: disable=inconsistent-return-statements + self, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + recursive: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Closes all handles open for given directory. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. Required. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param recursive: Specifies operation should apply to the directory specified in the URI, its + files, its subdirectories and their files. Default value is None. + :type recursive: bool + :keyword comp: comp. Default value is "forceclosehandles". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_force_close_handles_request( + url=self._config.url, + handle_id=handle_id, + timeout=timeout, + marker=marker, + sharesnapshot=sharesnapshot, + recursive=recursive, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.force_close_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker")) + response_headers["x-ms-number-of-handles-closed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-closed") + ) + response_headers["x-ms-number-of-handles-failed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-failed") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {"url": "{url}/{shareName}/{directory}"} + + @distributed_trace + def rename( # pylint: disable=inconsistent-return-statements + self, + rename_source: str, + timeout: Optional[int] = None, + replace_if_exists: Optional[bool] = None, + ignore_read_only: Optional[bool] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None, + destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None, + copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None, + **kwargs: Any + ) -> None: + """Renames a directory. + + :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in + length. Required. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param replace_if_exists: Optional. A boolean value for if the destination file already exists, + whether this request will overwrite the file or not. If true, the rename will succeed and will + overwrite the destination file. If not provided or if false and the destination file does + exist, the request will not overwrite the destination file. If provided and the destination + file doesn’t exist, the rename will succeed. Note: This value does not override the + x-ms-file-copy-ignore-read-only header value. Default value is None. + :type replace_if_exists: bool + :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly + attribute on a preexisting destination file should be respected. If true, the rename will + succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will + cause the rename to fail. Default value is None. + :type ignore_read_only: bool + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param source_lease_access_conditions: Parameter group. Default value is None. + :type source_lease_access_conditions: + ~azure.storage.fileshare.models.SourceLeaseAccessConditions + :param destination_lease_access_conditions: Parameter group. Default value is None. + :type destination_lease_access_conditions: + ~azure.storage.fileshare.models.DestinationLeaseAccessConditions + :param copy_file_smb_info: Parameter group. Default value is None. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :keyword restype: restype. Default value is "directory". Note that overriding this default + value may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "rename". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["directory"] = kwargs.pop("restype", _params.pop("restype", "directory")) + comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _source_lease_id = None + _destination_lease_id = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _file_change_time = None + if source_lease_access_conditions is not None: + _source_lease_id = source_lease_access_conditions.source_lease_id + if destination_lease_access_conditions is not None: + _destination_lease_id = destination_lease_access_conditions.destination_lease_id + if copy_file_smb_info is not None: + _file_attributes = copy_file_smb_info.file_attributes + _file_change_time = copy_file_smb_info.file_change_time + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + + request = build_rename_request( + url=self._config.url, + rename_source=rename_source, + timeout=timeout, + replace_if_exists=replace_if_exists, + ignore_read_only=ignore_read_only, + source_lease_id=_source_lease_id, + destination_lease_id=_destination_lease_id, + file_attributes=_file_attributes, + file_creation_time=_file_creation_time, + file_last_write_time=_file_last_write_time, + file_change_time=_file_change_time, + file_permission=file_permission, + file_permission_key=file_permission_key, + metadata=metadata, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.rename.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {"url": "{url}/{shareName}/{directory}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_file_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_file_operations.py new file mode 100644 index 00000000000..9bd4ef1f3e4 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_file_operations.py @@ -0,0 +1,3245 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, IO, Iterator, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_create_request( + url: str, + *, + file_content_length: int, + timeout: Optional[int] = None, + file_content_type: Optional[str] = None, + file_content_encoding: Optional[str] = None, + file_content_language: Optional[str] = None, + file_cache_control: Optional[str] = None, + file_content_md5: Optional[bytes] = None, + file_content_disposition: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["x-ms-content-length"] = _SERIALIZER.header("file_content_length", file_content_length, "int") + _headers["x-ms-type"] = _SERIALIZER.header("file_type_constant", file_type_constant, "str") + if file_content_type is not None: + _headers["x-ms-content-type"] = _SERIALIZER.header("file_content_type", file_content_type, "str") + if file_content_encoding is not None: + _headers["x-ms-content-encoding"] = _SERIALIZER.header("file_content_encoding", file_content_encoding, "str") + if file_content_language is not None: + _headers["x-ms-content-language"] = _SERIALIZER.header("file_content_language", file_content_language, "str") + if file_cache_control is not None: + _headers["x-ms-cache-control"] = _SERIALIZER.header("file_cache_control", file_cache_control, "str") + if file_content_md5 is not None: + _headers["x-ms-content-md5"] = _SERIALIZER.header("file_content_md5", file_content_md5, "bytearray") + if file_content_disposition is not None: + _headers["x-ms-content-disposition"] = _SERIALIZER.header( + "file_content_disposition", file_content_disposition, "str" + ) + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if file_permission is not None: + _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str") + if file_permission_key is not None: + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str") + if file_creation_time is not None: + _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str") + if file_last_write_time is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str") + if file_change_time is not None: + _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_download_request( + url: str, + *, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if range is not None: + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if range_get_content_md5 is not None: + _headers["x-ms-range-get-content-md5"] = _SERIALIZER.header( + "range_get_content_md5", range_get_content_md5, "bool" + ) + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_properties_request( + url: str, + *, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="HEAD", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + url: str, + *, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_http_headers_request( + url: str, + *, + timeout: Optional[int] = None, + file_content_length: Optional[int] = None, + file_content_type: Optional[str] = None, + file_content_encoding: Optional[str] = None, + file_content_language: Optional[str] = None, + file_cache_control: Optional[str] = None, + file_content_md5: Optional[bytes] = None, + file_content_disposition: Optional[str] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if file_content_length is not None: + _headers["x-ms-content-length"] = _SERIALIZER.header("file_content_length", file_content_length, "int") + if file_content_type is not None: + _headers["x-ms-content-type"] = _SERIALIZER.header("file_content_type", file_content_type, "str") + if file_content_encoding is not None: + _headers["x-ms-content-encoding"] = _SERIALIZER.header("file_content_encoding", file_content_encoding, "str") + if file_content_language is not None: + _headers["x-ms-content-language"] = _SERIALIZER.header("file_content_language", file_content_language, "str") + if file_cache_control is not None: + _headers["x-ms-cache-control"] = _SERIALIZER.header("file_cache_control", file_cache_control, "str") + if file_content_md5 is not None: + _headers["x-ms-content-md5"] = _SERIALIZER.header("file_content_md5", file_content_md5, "bytearray") + if file_content_disposition is not None: + _headers["x-ms-content-disposition"] = _SERIALIZER.header( + "file_content_disposition", file_content_disposition, "str" + ) + if file_permission is not None: + _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str") + if file_permission_key is not None: + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str") + if file_creation_time is not None: + _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str") + if file_last_write_time is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str") + if file_change_time is not None: + _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_metadata_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_acquire_lease_request( + url: str, + *, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if duration is not None: + _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int") + if proposed_lease_id is not None: + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_release_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_change_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if proposed_lease_id is not None: + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_break_lease_request( + url: str, + *, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_upload_range_request( + url: str, + *, + range: str, + content_length: int, + timeout: Optional[int] = None, + file_range_write: Union[str, _models.FileRangeWriteType] = "update", + content_md5: Optional[bytes] = None, + lease_id: Optional[str] = None, + file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None, + content: Optional[IO] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + _headers["x-ms-write"] = _SERIALIZER.header("file_range_write", file_range_write, "str") + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if content_md5 is not None: + _headers["Content-MD5"] = _SERIALIZER.header("content_md5", content_md5, "bytearray") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if file_last_written_mode is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header( + "file_last_written_mode", file_last_written_mode, "str" + ) + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_upload_range_from_url_request( + url: str, + *, + range: str, + copy_source: str, + content_length: int, + timeout: Optional[int] = None, + source_range: Optional[str] = None, + source_content_crc64: Optional[bytes] = None, + source_if_match_crc64: Optional[bytes] = None, + source_if_none_match_crc64: Optional[bytes] = None, + lease_id: Optional[str] = None, + copy_source_authorization: Optional[str] = None, + file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range")) + file_range_write_from_url: Literal["update"] = kwargs.pop( + "file_range_write_from_url", _headers.pop("x-ms-write", "update") + ) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str") + if source_range is not None: + _headers["x-ms-source-range"] = _SERIALIZER.header("source_range", source_range, "str") + _headers["x-ms-write"] = _SERIALIZER.header("file_range_write_from_url", file_range_write_from_url, "str") + _headers["Content-Length"] = _SERIALIZER.header("content_length", content_length, "int") + if source_content_crc64 is not None: + _headers["x-ms-source-content-crc64"] = _SERIALIZER.header( + "source_content_crc64", source_content_crc64, "bytearray" + ) + if source_if_match_crc64 is not None: + _headers["x-ms-source-if-match-crc64"] = _SERIALIZER.header( + "source_if_match_crc64", source_if_match_crc64, "bytearray" + ) + if source_if_none_match_crc64 is not None: + _headers["x-ms-source-if-none-match-crc64"] = _SERIALIZER.header( + "source_if_none_match_crc64", source_if_none_match_crc64, "bytearray" + ) + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if copy_source_authorization is not None: + _headers["x-ms-copy-source-authorization"] = _SERIALIZER.header( + "copy_source_authorization", copy_source_authorization, "str" + ) + if file_last_written_mode is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header( + "file_last_written_mode", file_last_written_mode, "str" + ) + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if allow_source_trailing_dot is not None: + _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header( + "allow_source_trailing_dot", allow_source_trailing_dot, "bool" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_range_list_request( + url: str, + *, + sharesnapshot: Optional[str] = None, + prevsharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["rangelist"] = kwargs.pop("comp", _params.pop("comp", "rangelist")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + if prevsharesnapshot is not None: + _params["prevsharesnapshot"] = _SERIALIZER.query("prevsharesnapshot", prevsharesnapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if range is not None: + _headers["x-ms-range"] = _SERIALIZER.header("range", range, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_start_copy_request( + url: str, + *, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_permission_copy_mode: Optional[Union[str, _models.PermissionCopyModeType]] = None, + ignore_read_only: Optional[bool] = None, + file_attributes: Optional[str] = None, + file_creation_time: Optional[str] = None, + file_last_write_time: Optional[str] = None, + file_change_time: Optional[str] = None, + set_archive_attribute: Optional[bool] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + _headers["x-ms-copy-source"] = _SERIALIZER.header("copy_source", copy_source, "str") + if file_permission is not None: + _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str") + if file_permission_key is not None: + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + if file_permission_copy_mode is not None: + _headers["x-ms-file-permission-copy-mode"] = _SERIALIZER.header( + "file_permission_copy_mode", file_permission_copy_mode, "str" + ) + if ignore_read_only is not None: + _headers["x-ms-file-copy-ignore-readonly"] = _SERIALIZER.header("ignore_read_only", ignore_read_only, "bool") + if file_attributes is not None: + _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str") + if file_creation_time is not None: + _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str") + if file_last_write_time is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str") + if file_change_time is not None: + _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str") + if set_archive_attribute is not None: + _headers["x-ms-file-copy-set-archive"] = _SERIALIZER.header( + "set_archive_attribute", set_archive_attribute, "bool" + ) + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if allow_source_trailing_dot is not None: + _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header( + "allow_source_trailing_dot", allow_source_trailing_dot, "bool" + ) + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_abort_copy_request( + url: str, + *, + copy_id: str, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy")) + copy_action_abort_constant: Literal["abort"] = kwargs.pop( + "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort") + ) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["copyid"] = _SERIALIZER.query("copy_id", copy_id, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-copy-action"] = _SERIALIZER.header("copy_action_abort_constant", copy_action_abort_constant, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_handles_request( + url: str, + *, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_force_close_handles_request( + url: str, + *, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-handle-id"] = _SERIALIZER.header("handle_id", handle_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_rename_request( + url: str, + *, + rename_source: str, + timeout: Optional[int] = None, + replace_if_exists: Optional[bool] = None, + ignore_read_only: Optional[bool] = None, + source_lease_id: Optional[str] = None, + destination_lease_id: Optional[str] = None, + file_attributes: Optional[str] = None, + file_creation_time: Optional[str] = None, + file_last_write_time: Optional[str] = None, + file_change_time: Optional[str] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + file_content_type: Optional[str] = None, + allow_trailing_dot: Optional[bool] = None, + allow_source_trailing_dot: Optional[bool] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}/{directory}/{fileName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["x-ms-file-rename-source"] = _SERIALIZER.header("rename_source", rename_source, "str") + if replace_if_exists is not None: + _headers["x-ms-file-rename-replace-if-exists"] = _SERIALIZER.header( + "replace_if_exists", replace_if_exists, "bool" + ) + if ignore_read_only is not None: + _headers["x-ms-file-rename-ignore-readonly"] = _SERIALIZER.header("ignore_read_only", ignore_read_only, "bool") + if source_lease_id is not None: + _headers["x-ms-source-lease-id"] = _SERIALIZER.header("source_lease_id", source_lease_id, "str") + if destination_lease_id is not None: + _headers["x-ms-destination-lease-id"] = _SERIALIZER.header("destination_lease_id", destination_lease_id, "str") + if file_attributes is not None: + _headers["x-ms-file-attributes"] = _SERIALIZER.header("file_attributes", file_attributes, "str") + if file_creation_time is not None: + _headers["x-ms-file-creation-time"] = _SERIALIZER.header("file_creation_time", file_creation_time, "str") + if file_last_write_time is not None: + _headers["x-ms-file-last-write-time"] = _SERIALIZER.header("file_last_write_time", file_last_write_time, "str") + if file_change_time is not None: + _headers["x-ms-file-change-time"] = _SERIALIZER.header("file_change_time", file_change_time, "str") + if file_permission is not None: + _headers["x-ms-file-permission"] = _SERIALIZER.header("file_permission", file_permission, "str") + if file_permission_key is not None: + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if file_content_type is not None: + _headers["x-ms-content-type"] = _SERIALIZER.header("file_content_type", file_content_type, "str") + if allow_trailing_dot is not None: + _headers["x-ms-allow-trailing-dot"] = _SERIALIZER.header("allow_trailing_dot", allow_trailing_dot, "bool") + if allow_source_trailing_dot is not None: + _headers["x-ms-source-allow-trailing-dot"] = _SERIALIZER.header( + "allow_source_trailing_dot", allow_source_trailing_dot, "bool" + ) + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +class FileOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.AzureFileStorage`'s + :attr:`file` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def create( # pylint: disable=inconsistent-return-statements + self, + file_content_length: int, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + file_http_headers: Optional[_models.FileHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Creates a new file or replaces a file. Note it only initializes the file with no content. + + :param file_content_length: Specifies the maximum size for the file, up to 4 TB. Required. + :type file_content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :param file_http_headers: Parameter group. Default value is None. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword file_type_constant: Dummy constant parameter, file type can only be file. Default + value is "file". Note that overriding this default value may result in unsupported behavior. + :paramtype file_type_constant: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + file_type_constant: Literal["file"] = kwargs.pop("file_type_constant", _headers.pop("x-ms-type", "file")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_cache_control = file_http_headers.file_cache_control + _file_content_disposition = file_http_headers.file_content_disposition + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_type = file_http_headers.file_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_create_request( + url=self._config.url, + file_content_length=file_content_length, + timeout=timeout, + file_content_type=_file_content_type, + file_content_encoding=_file_content_encoding, + file_content_language=_file_content_language, + file_cache_control=_file_cache_control, + file_content_md5=_file_content_md5, + file_content_disposition=_file_content_disposition, + metadata=metadata, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + file_type_constant=file_type_constant, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def download( + self, + timeout: Optional[int] = None, + range: Optional[str] = None, + range_get_content_md5: Optional[bool] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> Iterator[bytes]: + """Reads or downloads a file from the system, including its metadata and properties. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param range: Return file data only from the specified byte range. Default value is None. + :type range: str + :param range_get_content_md5: When this header is set to true and specified together with the + Range header, the service returns the MD5 hash for the range, as long as the range is less than + or equal to 4 MB in size. Default value is None. + :type range_get_content_md5: bool + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: Iterator of the response bytes or the result of cls(response) + :rtype: Iterator[bytes] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Iterator[bytes]] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_download_request( + url=self._config.url, + timeout=timeout, + range=range, + range_get_content_md5=range_get_content_md5, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.download.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=True, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + if response.status_code == 200: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-md5") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize( + "str", response.headers.get("x-ms-file-parent-id") + ) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + + deserialized = response.stream_download(self._client._pipeline) + + if response.status_code == 206: + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["Content-Range"] = self._deserialize("str", response.headers.get("Content-Range")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize( + "str", response.headers.get("Content-Disposition") + ) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Accept-Ranges"] = self._deserialize("str", response.headers.get("Accept-Ranges")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize( + "str", response.headers.get("x-ms-copy-progress") + ) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-content-md5"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-md5") + ) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize( + "str", response.headers.get("x-ms-file-parent-id") + ) + response_headers["x-ms-lease-duration"] = self._deserialize( + "str", response.headers.get("x-ms-lease-duration") + ) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + + deserialized = response.stream_download(self._client._pipeline) + + if cls: + return cls(pipeline_response, deserialized, response_headers) # type: ignore + + return deserialized # type: ignore + + download.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def get_properties( # pylint: disable=inconsistent-return-statements + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Returns all user-defined metadata, standard HTTP properties, and system properties for the + file. It does not return the content of the file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_properties_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["x-ms-type"] = self._deserialize("str", response.headers.get("x-ms-type")) + response_headers["Content-Length"] = self._deserialize("int", response.headers.get("Content-Length")) + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["Content-Encoding"] = self._deserialize("str", response.headers.get("Content-Encoding")) + response_headers["Cache-Control"] = self._deserialize("str", response.headers.get("Cache-Control")) + response_headers["Content-Disposition"] = self._deserialize("str", response.headers.get("Content-Disposition")) + response_headers["Content-Language"] = self._deserialize("str", response.headers.get("Content-Language")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-completion-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-copy-completion-time") + ) + response_headers["x-ms-copy-status-description"] = self._deserialize( + "str", response.headers.get("x-ms-copy-status-description") + ) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-progress"] = self._deserialize("str", response.headers.get("x-ms-copy-progress")) + response_headers["x-ms-copy-source"] = self._deserialize("str", response.headers.get("x-ms-copy-source")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + response_headers["x-ms-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-server-encrypted") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """removes the file from the storage account. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_delete_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def set_http_headers( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + file_content_length: Optional[int] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + file_attributes: str = "none", + file_creation_time: str = "now", + file_last_write_time: str = "now", + file_change_time: Optional[str] = None, + file_http_headers: Optional[_models.FileHTTPHeaders] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Sets HTTP headers on the file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param file_content_length: Resizes a file to the specified size. If the specified byte value + is less than the current size of the file, then all ranges above the specified byte value are + cleared. Default value is None. + :type file_content_length: int + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param file_attributes: If specified, the provided file attributes shall be set. Default value: + ‘Archive’ for file and ‘Directory’ for directory. ‘None’ can also be specified as default. + Default value is "none". + :type file_attributes: str + :param file_creation_time: Creation time for the file/directory. Default value: Now. Default + value is "now". + :type file_creation_time: str + :param file_last_write_time: Last write time for the file/directory. Default value: Now. + Default value is "now". + :type file_last_write_time: str + :param file_change_time: Change time for the file/directory. Default value: Now. Default value + is None. + :type file_change_time: str + :param file_http_headers: Parameter group. Default value is None. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _file_content_type = None + _file_content_encoding = None + _file_content_language = None + _file_cache_control = None + _file_content_md5 = None + _file_content_disposition = None + _lease_id = None + if file_http_headers is not None: + _file_cache_control = file_http_headers.file_cache_control + _file_content_disposition = file_http_headers.file_content_disposition + _file_content_encoding = file_http_headers.file_content_encoding + _file_content_language = file_http_headers.file_content_language + _file_content_md5 = file_http_headers.file_content_md5 + _file_content_type = file_http_headers.file_content_type + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_http_headers_request( + url=self._config.url, + timeout=timeout, + file_content_length=file_content_length, + file_content_type=_file_content_type, + file_content_encoding=_file_content_encoding, + file_content_language=_file_content_language, + file_cache_control=_file_cache_control, + file_content_md5=_file_content_md5, + file_content_disposition=_file_content_disposition, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_change_time=file_change_time, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.set_http_headers.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_http_headers.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Updates user-defined metadata for the specified file. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def release_lease( # pylint: disable=inconsistent-return-statements + self, lease_id: str, timeout: Optional[int] = None, request_id_parameter: Optional[str] = None, **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + proposed_lease_id=proposed_lease_id, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """[Update] The Lease File operation establishes and manages a lock on a file for write and delete + operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + action=action, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def upload_range( # pylint: disable=inconsistent-return-statements + self, + range: str, + content_length: int, + timeout: Optional[int] = None, + file_range_write: Union[str, _models.FileRangeWriteType] = "update", + content_md5: Optional[bytes] = None, + file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + optionalbody: Optional[IO] = None, + **kwargs: Any + ) -> None: + """Upload a range of bytes to a file. + + :param range: Specifies the range of bytes to be written. Both the start and end of the range + must be specified. For an update operation, the range can be up to 4 MB in size. For a clear + operation, the range can be up to the value of the file's full size. The File service accepts + only a single byte range for the Range and 'x-ms-range' headers, and the byte range must be + specified in the following format: bytes=startByte-endByte. Required. + :type range: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param file_range_write: Specify one of the following options: - Update: Writes the bytes + specified by the request body into the specified range. The Range and Content-Length headers + must match to perform the update. - Clear: Clears the specified range and releases the space + used in storage for that range. To clear a range, set the Content-Length header to zero, and + set the Range header to a value that indicates the range to clear, up to maximum file size. + Known values are: "update" and "clear". Default value is "update". + :type file_range_write: str or ~azure.storage.fileshare.models.FileRangeWriteType + :param content_md5: An MD5 hash of the content. This hash is used to verify the integrity of + the data during transport. When the Content-MD5 header is specified, the File service compares + the hash of the content that has arrived with the header value that was sent. If the two hashes + do not match, the operation will fail with error code 400 (Bad Request). Default value is None. + :type content_md5: bytes + :param file_last_written_mode: If the file last write time should be preserved or overwritten. + Known values are: "Now" and "Preserve". Default value is None. + :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :param optionalbody: Initial data. Default value is None. + :type optionalbody: IO + :keyword comp: comp. Default value is "range". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range")) + content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/octet-stream")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + _content = optionalbody + + request = build_upload_range_request( + url=self._config.url, + range=range, + content_length=content_length, + timeout=timeout, + file_range_write=file_range_write, + content_md5=content_md5, + lease_id=_lease_id, + file_last_written_mode=file_last_written_mode, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.upload_range.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["Content-MD5"] = self._deserialize("bytearray", response.headers.get("Content-MD5")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def upload_range_from_url( # pylint: disable=inconsistent-return-statements + self, + range: str, + copy_source: str, + content_length: int, + timeout: Optional[int] = None, + source_range: Optional[str] = None, + source_content_crc64: Optional[bytes] = None, + copy_source_authorization: Optional[str] = None, + file_last_written_mode: Optional[Union[str, _models.FileLastWrittenMode]] = None, + source_modified_access_conditions: Optional[_models.SourceModifiedAccessConditions] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Upload a range of bytes to a file where the contents are read from a URL. + + :param range: Writes data to the specified byte range in the file. Required. + :type range: str + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. Required. + :type copy_source: str + :param content_length: Specifies the number of bytes being transmitted in the request body. + When the x-ms-write header is set to clear, the value of this header must be set to zero. + Required. + :type content_length: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param source_range: Bytes of source data in the specified range. Default value is None. + :type source_range: str + :param source_content_crc64: Specify the crc64 calculated for the range of bytes that must be + read from the copy source. Default value is None. + :type source_content_crc64: bytes + :param copy_source_authorization: Only Bearer type is supported. Credentials should be a valid + OAuth access token to copy source. Default value is None. + :type copy_source_authorization: str + :param file_last_written_mode: If the file last write time should be preserved or overwritten. + Known values are: "Now" and "Preserve". Default value is None. + :type file_last_written_mode: str or ~azure.storage.fileshare.models.FileLastWrittenMode + :param source_modified_access_conditions: Parameter group. Default value is None. + :type source_modified_access_conditions: + ~azure.storage.fileshare.models.SourceModifiedAccessConditions + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "range". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["range"] = kwargs.pop("comp", _params.pop("comp", "range")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _source_if_match_crc64 = None + _source_if_none_match_crc64 = None + _lease_id = None + if source_modified_access_conditions is not None: + _source_if_match_crc64 = source_modified_access_conditions.source_if_match_crc64 + _source_if_none_match_crc64 = source_modified_access_conditions.source_if_none_match_crc64 + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_upload_range_from_url_request( + url=self._config.url, + range=range, + copy_source=copy_source, + content_length=content_length, + timeout=timeout, + source_range=source_range, + source_content_crc64=source_content_crc64, + source_if_match_crc64=_source_if_match_crc64, + source_if_none_match_crc64=_source_if_none_match_crc64, + lease_id=_lease_id, + copy_source_authorization=copy_source_authorization, + file_last_written_mode=file_last_written_mode, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + comp=comp, + file_range_write_from_url=self._config.file_range_write_from_url, + version=self._config.version, + template_url=self.upload_range_from_url.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-content-crc64"] = self._deserialize( + "bytearray", response.headers.get("x-ms-content-crc64") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + upload_range_from_url.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def get_range_list( + self, + sharesnapshot: Optional[str] = None, + prevsharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + range: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> _models.ShareFileRangeList: + """Returns the list of valid ranges for a file. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param prevsharesnapshot: The previous snapshot parameter is an opaque DateTime value that, + when present, specifies the previous snapshot. Default value is None. + :type prevsharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param range: Specifies the range of bytes over which to list ranges, inclusively. Default + value is None. + :type range: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "rangelist". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareFileRangeList or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareFileRangeList + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["rangelist"] = kwargs.pop("comp", _params.pop("comp", "rangelist")) + cls: ClsType[_models.ShareFileRangeList] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_range_list_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + prevsharesnapshot=prevsharesnapshot, + timeout=timeout, + range=range, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.get_range_list.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["x-ms-content-length"] = self._deserialize("int", response.headers.get("x-ms-content-length")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ShareFileRangeList", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_range_list.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def start_copy( # pylint: disable=inconsistent-return-statements + self, + copy_source: str, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Copies a blob or file to a destination file within the storage account. + + :param copy_source: Specifies the URL of the source file or blob, up to 2 KB in length. To copy + a file to another file within the same storage account, you may use Shared Key to authenticate + the source file. If you are copying a file from another storage account, or if you are copying + a blob from the same storage account or another storage account, then you must authenticate the + source file or blob using a shared access signature. If the source is a public blob, no + authentication is required to perform the copy operation. A file in a share snapshot can also + be specified as a copy source. Required. + :type copy_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param copy_file_smb_info: Parameter group. Default value is None. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _file_permission_copy_mode = None + _ignore_read_only = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _file_change_time = None + _set_archive_attribute = None + _lease_id = None + if copy_file_smb_info is not None: + _file_attributes = copy_file_smb_info.file_attributes + _file_change_time = copy_file_smb_info.file_change_time + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + _file_permission_copy_mode = copy_file_smb_info.file_permission_copy_mode + _ignore_read_only = copy_file_smb_info.ignore_read_only + _set_archive_attribute = copy_file_smb_info.set_archive_attribute + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_start_copy_request( + url=self._config.url, + copy_source=copy_source, + timeout=timeout, + metadata=metadata, + file_permission=file_permission, + file_permission_key=file_permission_key, + file_permission_copy_mode=_file_permission_copy_mode, + ignore_read_only=_ignore_read_only, + file_attributes=_file_attributes, + file_creation_time=_file_creation_time, + file_last_write_time=_file_last_write_time, + file_change_time=_file_change_time, + set_archive_attribute=_set_archive_attribute, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + file_request_intent=self._config.file_request_intent, + version=self._config.version, + template_url=self.start_copy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-copy-id"] = self._deserialize("str", response.headers.get("x-ms-copy-id")) + response_headers["x-ms-copy-status"] = self._deserialize("str", response.headers.get("x-ms-copy-status")) + + if cls: + return cls(pipeline_response, None, response_headers) + + start_copy.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def abort_copy( # pylint: disable=inconsistent-return-statements + self, + copy_id: str, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Aborts a pending Copy File operation, and leaves a destination file with zero length and full + metadata. + + :param copy_id: The copy identifier provided in the x-ms-copy-id header of the original Copy + File operation. Required. + :type copy_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "copy". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword copy_action_abort_constant: Abort. Default value is "abort". Note that overriding this + default value may result in unsupported behavior. + :paramtype copy_action_abort_constant: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["copy"] = kwargs.pop("comp", _params.pop("comp", "copy")) + copy_action_abort_constant: Literal["abort"] = kwargs.pop( + "copy_action_abort_constant", _headers.pop("x-ms-copy-action", "abort") + ) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_abort_copy_request( + url=self._config.url, + copy_id=copy_id, + timeout=timeout, + lease_id=_lease_id, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + copy_action_abort_constant=copy_action_abort_constant, + version=self._config.version, + template_url=self.abort_copy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + abort_copy.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def list_handles( + self, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + **kwargs: Any + ) -> _models.ListHandlesResponse: + """Lists handles for file. + + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :keyword comp: comp. Default value is "listhandles". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListHandlesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListHandlesResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["listhandles"] = kwargs.pop("comp", _params.pop("comp", "listhandles")) + cls: ClsType[_models.ListHandlesResponse] = kwargs.pop("cls", None) + + request = build_list_handles_request( + url=self._config.url, + marker=marker, + maxresults=maxresults, + timeout=timeout, + sharesnapshot=sharesnapshot, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.list_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["Content-Type"] = self._deserialize("str", response.headers.get("Content-Type")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ListHandlesResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_handles.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def force_close_handles( # pylint: disable=inconsistent-return-statements + self, + handle_id: str, + timeout: Optional[int] = None, + marker: Optional[str] = None, + sharesnapshot: Optional[str] = None, + **kwargs: Any + ) -> None: + """Closes all handles open for given file. + + :param handle_id: Specifies handle ID opened on the file or directory to be closed. Asterisk + (‘*’) is a wildcard that specifies all handles. Required. + :type handle_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :keyword comp: comp. Default value is "forceclosehandles". Note that overriding this default + value may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["forceclosehandles"] = kwargs.pop("comp", _params.pop("comp", "forceclosehandles")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_force_close_handles_request( + url=self._config.url, + handle_id=handle_id, + timeout=timeout, + marker=marker, + sharesnapshot=sharesnapshot, + allow_trailing_dot=self._config.allow_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.force_close_handles.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-marker"] = self._deserialize("str", response.headers.get("x-ms-marker")) + response_headers["x-ms-number-of-handles-closed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-closed") + ) + response_headers["x-ms-number-of-handles-failed"] = self._deserialize( + "int", response.headers.get("x-ms-number-of-handles-failed") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + force_close_handles.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} + + @distributed_trace + def rename( # pylint: disable=inconsistent-return-statements + self, + rename_source: str, + timeout: Optional[int] = None, + replace_if_exists: Optional[bool] = None, + ignore_read_only: Optional[bool] = None, + file_permission: str = "inherit", + file_permission_key: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + source_lease_access_conditions: Optional[_models.SourceLeaseAccessConditions] = None, + destination_lease_access_conditions: Optional[_models.DestinationLeaseAccessConditions] = None, + copy_file_smb_info: Optional[_models.CopyFileSmbInfo] = None, + file_http_headers: Optional[_models.FileHTTPHeaders] = None, + **kwargs: Any + ) -> None: + """Renames a file. + + :param rename_source: Required. Specifies the URI-style path of the source file, up to 2 KB in + length. Required. + :type rename_source: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param replace_if_exists: Optional. A boolean value for if the destination file already exists, + whether this request will overwrite the file or not. If true, the rename will succeed and will + overwrite the destination file. If not provided or if false and the destination file does + exist, the request will not overwrite the destination file. If provided and the destination + file doesn’t exist, the rename will succeed. Note: This value does not override the + x-ms-file-copy-ignore-read-only header value. Default value is None. + :type replace_if_exists: bool + :param ignore_read_only: Optional. A boolean value that specifies whether the ReadOnly + attribute on a preexisting destination file should be respected. If true, the rename will + succeed, otherwise, a previous file at the destination with the ReadOnly attribute set will + cause the rename to fail. Default value is None. + :type ignore_read_only: bool + :param file_permission: If specified the permission (security descriptor) shall be set for the + directory/file. This header can be used if Permission size is <= 8KB, else + x-ms-file-permission-key header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. Default value is "inherit". + :type file_permission: str + :param file_permission_key: Key of the permission to be set for the directory/file. Note: Only + one of the x-ms-file-permission or x-ms-file-permission-key should be specified. Default value + is None. + :type file_permission_key: str + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param source_lease_access_conditions: Parameter group. Default value is None. + :type source_lease_access_conditions: + ~azure.storage.fileshare.models.SourceLeaseAccessConditions + :param destination_lease_access_conditions: Parameter group. Default value is None. + :type destination_lease_access_conditions: + ~azure.storage.fileshare.models.DestinationLeaseAccessConditions + :param copy_file_smb_info: Parameter group. Default value is None. + :type copy_file_smb_info: ~azure.storage.fileshare.models.CopyFileSmbInfo + :param file_http_headers: Parameter group. Default value is None. + :type file_http_headers: ~azure.storage.fileshare.models.FileHTTPHeaders + :keyword comp: comp. Default value is "rename". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["rename"] = kwargs.pop("comp", _params.pop("comp", "rename")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _source_lease_id = None + _destination_lease_id = None + _file_attributes = None + _file_creation_time = None + _file_last_write_time = None + _file_change_time = None + _file_content_type = None + if source_lease_access_conditions is not None: + _source_lease_id = source_lease_access_conditions.source_lease_id + if destination_lease_access_conditions is not None: + _destination_lease_id = destination_lease_access_conditions.destination_lease_id + if copy_file_smb_info is not None: + _file_attributes = copy_file_smb_info.file_attributes + _file_change_time = copy_file_smb_info.file_change_time + _file_creation_time = copy_file_smb_info.file_creation_time + _file_last_write_time = copy_file_smb_info.file_last_write_time + if file_http_headers is not None: + _file_content_type = file_http_headers.file_content_type + + request = build_rename_request( + url=self._config.url, + rename_source=rename_source, + timeout=timeout, + replace_if_exists=replace_if_exists, + ignore_read_only=ignore_read_only, + source_lease_id=_source_lease_id, + destination_lease_id=_destination_lease_id, + file_attributes=_file_attributes, + file_creation_time=_file_creation_time, + file_last_write_time=_file_last_write_time, + file_change_time=_file_change_time, + file_permission=file_permission, + file_permission_key=file_permission_key, + metadata=metadata, + file_content_type=_file_content_type, + allow_trailing_dot=self._config.allow_trailing_dot, + allow_source_trailing_dot=self._config.allow_source_trailing_dot, + file_request_intent=self._config.file_request_intent, + comp=comp, + version=self._config.version, + template_url=self.rename.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-request-server-encrypted"] = self._deserialize( + "bool", response.headers.get("x-ms-request-server-encrypted") + ) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + response_headers["x-ms-file-attributes"] = self._deserialize( + "str", response.headers.get("x-ms-file-attributes") + ) + response_headers["x-ms-file-creation-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-creation-time") + ) + response_headers["x-ms-file-last-write-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-last-write-time") + ) + response_headers["x-ms-file-change-time"] = self._deserialize( + "str", response.headers.get("x-ms-file-change-time") + ) + response_headers["x-ms-file-id"] = self._deserialize("str", response.headers.get("x-ms-file-id")) + response_headers["x-ms-file-parent-id"] = self._deserialize("str", response.headers.get("x-ms-file-parent-id")) + + if cls: + return cls(pipeline_response, None, response_headers) + + rename.metadata = {"url": "{url}/{shareName}/{directory}/{fileName}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_patch.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_patch.py new file mode 100644 index 00000000000..f7dd3251033 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_patch.py @@ -0,0 +1,20 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +"""Customize generated code here. + +Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize +""" +from typing import List + +__all__: List[str] = [] # Add all objects you want publicly available to users at this package level + + +def patch_sdk(): + """Do not remove from this file. + + `patch_sdk` is a last resort escape hatch that allows you to do customizations + you can't accomplish using the techniques described in + https://aka.ms/azsdk/python/dpcodegen/python/customize + """ diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_service_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_service_operations.py new file mode 100644 index 00000000000..750b899dc9a --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_service_operations.py @@ -0,0 +1,414 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, List, Optional, TypeVar, Union + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_set_properties_request( + url: str, *, content: Any, timeout: Optional[int] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_get_properties_request(url: str, *, timeout: Optional[int] = None, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_list_shares_segment_request( + url: str, + *, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, _models.ListSharesIncludeType]]] = None, + timeout: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if prefix is not None: + _params["prefix"] = _SERIALIZER.query("prefix", prefix, "str") + if marker is not None: + _params["marker"] = _SERIALIZER.query("marker", marker, "str") + if maxresults is not None: + _params["maxresults"] = _SERIALIZER.query("maxresults", maxresults, "int", minimum=1) + if include is not None: + _params["include"] = _SERIALIZER.query("include", include, "[str]", div=",") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class ServiceOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.AzureFileStorage`'s + :attr:`service` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def set_properties( # pylint: disable=inconsistent-return-statements + self, storage_service_properties: _models.StorageServiceProperties, timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Sets properties for a storage account's File service endpoint, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param storage_service_properties: The StorageService properties. Required. + :type storage_service_properties: ~azure.storage.fileshare.models.StorageServiceProperties + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _content = self._serialize.body(storage_service_properties, "StorageServiceProperties", is_xml=True) + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}"} + + @distributed_trace + def get_properties(self, timeout: Optional[int] = None, **kwargs: Any) -> _models.StorageServiceProperties: + """Gets the properties of a storage account's File service, including properties for Storage + Analytics metrics and CORS (Cross-Origin Resource Sharing) rules. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "service". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: StorageServiceProperties or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.StorageServiceProperties + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["service"] = kwargs.pop("restype", _params.pop("restype", "service")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[_models.StorageServiceProperties] = kwargs.pop("cls", None) + + request = build_get_properties_request( + url=self._config.url, + timeout=timeout, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("StorageServiceProperties", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_properties.metadata = {"url": "{url}"} + + @distributed_trace + def list_shares_segment( + self, + prefix: Optional[str] = None, + marker: Optional[str] = None, + maxresults: Optional[int] = None, + include: Optional[List[Union[str, _models.ListSharesIncludeType]]] = None, + timeout: Optional[int] = None, + **kwargs: Any + ) -> _models.ListSharesResponse: + """The List Shares Segment operation returns a list of the shares and share snapshots under the + specified account. + + :param prefix: Filters the results to return only entries whose name begins with the specified + prefix. Default value is None. + :type prefix: str + :param marker: A string value that identifies the portion of the list to be returned with the + next list operation. The operation returns a marker value within the response body if the list + returned was not complete. The marker value may then be used in a subsequent call to request + the next set of list items. The marker value is opaque to the client. Default value is None. + :type marker: str + :param maxresults: Specifies the maximum number of entries to return. If the request does not + specify maxresults, or specifies a value greater than 5,000, the server will return up to 5,000 + items. Default value is None. + :type maxresults: int + :param include: Include this parameter to specify one or more datasets to include in the + response. Default value is None. + :type include: list[str or ~azure.storage.fileshare.models.ListSharesIncludeType] + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword comp: comp. Default value is "list". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ListSharesResponse or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ListSharesResponse + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["list"] = kwargs.pop("comp", _params.pop("comp", "list")) + cls: ClsType[_models.ListSharesResponse] = kwargs.pop("cls", None) + + request = build_list_shares_segment_request( + url=self._config.url, + prefix=prefix, + marker=marker, + maxresults=maxresults, + include=include, + timeout=timeout, + comp=comp, + version=self._config.version, + template_url=self.list_shares_segment.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + + deserialized = self._deserialize("ListSharesResponse", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + list_shares_segment.metadata = {"url": "{url}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_share_operations.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_share_operations.py new file mode 100644 index 00000000000..e71518eb239 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_generated/operations/_share_operations.py @@ -0,0 +1,2466 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import sys +from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload + +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceNotFoundError, + ResourceNotModifiedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.pipeline.transport import HttpResponse +from azure.core.rest import HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._serialization import Serializer +from .._vendor import _convert_request, _format_url_section + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_create_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, _models.ShareAccessTier]] = None, + enabled_protocols: Optional[str] = None, + root_squash: Optional[Union[str, _models.ShareRootSquash]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + if quota is not None: + _headers["x-ms-share-quota"] = _SERIALIZER.header("quota", quota, "int", minimum=1) + if access_tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("access_tier", access_tier, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if enabled_protocols is not None: + _headers["x-ms-enabled-protocols"] = _SERIALIZER.header("enabled_protocols", enabled_protocols, "str") + if root_squash is not None: + _headers["x-ms-root-squash"] = _SERIALIZER.header("root_squash", root_squash, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_properties_request( + url: str, + *, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_id: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_delete_request( + url: str, + *, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None, + lease_id: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if delete_snapshots is not None: + _headers["x-ms-delete-snapshots"] = _SERIALIZER.header("delete_snapshots", delete_snapshots, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_acquire_lease_request( + url: str, + *, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if duration is not None: + _headers["x-ms-lease-duration"] = _SERIALIZER.header("duration", duration, "int") + if proposed_lease_id is not None: + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_release_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_change_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if proposed_lease_id is not None: + _headers["x-ms-proposed-lease-id"] = _SERIALIZER.header("proposed_lease_id", proposed_lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_renew_lease_request( + url: str, + *, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_break_lease_request( + url: str, + *, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + lease_id: Optional[str] = None, + request_id_parameter: Optional[str] = None, + sharesnapshot: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + if sharesnapshot is not None: + _params["sharesnapshot"] = _SERIALIZER.query("sharesnapshot", sharesnapshot, "str") + + # Construct headers + _headers["x-ms-lease-action"] = _SERIALIZER.header("action", action, "str") + if break_period is not None: + _headers["x-ms-lease-break-period"] = _SERIALIZER.header("break_period", break_period, "int") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_snapshot_request( + url: str, *, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_create_permission_request( + url: str, + *, + timeout: Optional[int] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_permission_request( + url: str, + *, + file_permission_key: str, + timeout: Optional[int] = None, + file_request_intent: Optional[Union[str, _models.ShareTokenIntent]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-file-permission-key"] = _SERIALIZER.header("file_permission_key", file_permission_key, "str") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if file_request_intent is not None: + _headers["x-ms-file-request-intent"] = _SERIALIZER.header("file_request_intent", file_request_intent, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_properties_request( + url: str, + *, + timeout: Optional[int] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, _models.ShareAccessTier]] = None, + lease_id: Optional[str] = None, + root_squash: Optional[Union[str, _models.ShareRootSquash]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if quota is not None: + _headers["x-ms-share-quota"] = _SERIALIZER.header("quota", quota, "int", minimum=1) + if access_tier is not None: + _headers["x-ms-access-tier"] = _SERIALIZER.header("access_tier", access_tier, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if root_squash is not None: + _headers["x-ms-root-squash"] = _SERIALIZER.header("root_squash", root_squash, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_metadata_request( + url: str, + *, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + lease_id: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + if metadata is not None: + _headers["x-ms-meta"] = _SERIALIZER.header("metadata", metadata, "{str}") + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_get_access_policy_request( + url: str, *, timeout: Optional[int] = None, lease_id: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_set_access_policy_request( + url: str, *, timeout: Optional[int] = None, lease_id: Optional[str] = None, content: Any = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, content=content, **kwargs) + + +def build_get_statistics_request( + url: str, *, timeout: Optional[int] = None, lease_id: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if lease_id is not None: + _headers["x-ms-lease-id"] = _SERIALIZER.header("lease_id", lease_id, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_restore_request( + url: str, + *, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_share_name: Optional[str] = None, + deleted_share_version: Optional[str] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete")) + version: Literal["2022-11-02"] = kwargs.pop("version", _headers.pop("x-ms-version", "2022-11-02")) + accept = _headers.pop("Accept", "application/xml") + + # Construct URL + _url = kwargs.pop("template_url", "{url}/{shareName}") + path_format_arguments = { + "url": _SERIALIZER.url("url", url, "str", skip_quote=True), + } + + _url: str = _format_url_section(_url, **path_format_arguments) # type: ignore + + # Construct parameters + _params["restype"] = _SERIALIZER.query("restype", restype, "str") + _params["comp"] = _SERIALIZER.query("comp", comp, "str") + if timeout is not None: + _params["timeout"] = _SERIALIZER.query("timeout", timeout, "int", minimum=0) + + # Construct headers + _headers["x-ms-version"] = _SERIALIZER.header("version", version, "str") + if request_id_parameter is not None: + _headers["x-ms-client-request-id"] = _SERIALIZER.header("request_id_parameter", request_id_parameter, "str") + if deleted_share_name is not None: + _headers["x-ms-deleted-share-name"] = _SERIALIZER.header("deleted_share_name", deleted_share_name, "str") + if deleted_share_version is not None: + _headers["x-ms-deleted-share-version"] = _SERIALIZER.header( + "deleted_share_version", deleted_share_version, "str" + ) + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +class ShareOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.storage.fileshare.AzureFileStorage`'s + :attr:`share` attribute. + """ + + models = _models + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def create( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, _models.ShareAccessTier]] = None, + enabled_protocols: Optional[str] = None, + root_squash: Optional[Union[str, _models.ShareRootSquash]] = None, + **kwargs: Any + ) -> None: + """Creates a new share under the specified account. If the share with the same name already + exists, the operation fails. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None. + :type quota: int + :param access_tier: Specifies the access tier of the share. Known values are: + "TransactionOptimized", "Hot", and "Cool". Default value is None. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param enabled_protocols: Protocols to enable on the share. Default value is None. + :type enabled_protocols: str + :param root_squash: Root squash to set on the share. Only valid for NFS shares. Known values + are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_create_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + quota=quota, + access_tier=access_tier, + enabled_protocols=enabled_protocols, + root_squash=root_squash, + restype=restype, + version=self._config.version, + template_url=self.create.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def get_properties( # pylint: disable=inconsistent-return-statements + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Returns all user-defined metadata and system properties for the specified share or share + snapshot. The data returned does not include the share's list of files. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_properties_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + version=self._config.version, + template_url=self.get_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-meta"] = self._deserialize("{str}", response.headers.get("x-ms-meta")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-share-quota"] = self._deserialize("int", response.headers.get("x-ms-share-quota")) + response_headers["x-ms-share-provisioned-iops"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-iops") + ) + response_headers["x-ms-share-provisioned-ingress-mbps"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-ingress-mbps") + ) + response_headers["x-ms-share-provisioned-egress-mbps"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-egress-mbps") + ) + response_headers["x-ms-share-next-allowed-quota-downgrade-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-share-next-allowed-quota-downgrade-time") + ) + response_headers["x-ms-share-provisioned-bandwidth-mibps"] = self._deserialize( + "int", response.headers.get("x-ms-share-provisioned-bandwidth-mibps") + ) + response_headers["x-ms-lease-duration"] = self._deserialize("str", response.headers.get("x-ms-lease-duration")) + response_headers["x-ms-lease-state"] = self._deserialize("str", response.headers.get("x-ms-lease-state")) + response_headers["x-ms-lease-status"] = self._deserialize("str", response.headers.get("x-ms-lease-status")) + response_headers["x-ms-access-tier"] = self._deserialize("str", response.headers.get("x-ms-access-tier")) + response_headers["x-ms-access-tier-change-time"] = self._deserialize( + "rfc-1123", response.headers.get("x-ms-access-tier-change-time") + ) + response_headers["x-ms-access-tier-transition-state"] = self._deserialize( + "str", response.headers.get("x-ms-access-tier-transition-state") + ) + response_headers["x-ms-enabled-protocols"] = self._deserialize( + "str", response.headers.get("x-ms-enabled-protocols") + ) + response_headers["x-ms-root-squash"] = self._deserialize("str", response.headers.get("x-ms-root-squash")) + + if cls: + return cls(pipeline_response, None, response_headers) + + get_properties.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + sharesnapshot: Optional[str] = None, + timeout: Optional[int] = None, + delete_snapshots: Optional[Union[str, _models.DeleteSnapshotsOptionType]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Operation marks the specified share or share snapshot for deletion. The share or share snapshot + and any files contained within it are later deleted during garbage collection. + + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param delete_snapshots: Specifies the option include to delete the base share and all of its + snapshots. Known values are: "include" and "include-leased". Default value is None. + :type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_delete_request( + url=self._config.url, + sharesnapshot=sharesnapshot, + timeout=timeout, + delete_snapshots=delete_snapshots, + lease_id=_lease_id, + restype=restype, + version=self._config.version, + template_url=self.delete.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + delete.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def acquire_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + duration: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a + lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease + duration cannot be changed using renew or change. Default value is None. + :type duration: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "acquire". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["acquire"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "acquire")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_acquire_lease_request( + url=self._config.url, + timeout=timeout, + duration=duration, + proposed_lease_id=proposed_lease_id, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.acquire_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + acquire_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def release_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "release". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["release"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "release")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_release_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.release_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + release_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def change_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + proposed_lease_id: Optional[str] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns + 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid + Constructor (String) for a list of valid GUID string formats. Default value is None. + :type proposed_lease_id: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "change". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["change"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "change")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_change_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + proposed_lease_id=proposed_lease_id, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.change_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + change_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def renew_lease( # pylint: disable=inconsistent-return-statements + self, + lease_id: str, + timeout: Optional[int] = None, + sharesnapshot: Optional[str] = None, + request_id_parameter: Optional[str] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param lease_id: Specifies the current lease ID on the resource. Required. + :type lease_id: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "renew". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["renew"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "renew")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_renew_lease_request( + url=self._config.url, + lease_id=lease_id, + timeout=timeout, + sharesnapshot=sharesnapshot, + request_id_parameter=request_id_parameter, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.renew_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + renew_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def break_lease( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + break_period: Optional[int] = None, + request_id_parameter: Optional[str] = None, + sharesnapshot: Optional[str] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """The Lease Share operation establishes and manages a lock on a share, or the specified snapshot + for set and delete share operations. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param break_period: For a break operation, proposed duration the lease should continue before + it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter + than the time remaining on the lease. If longer, the time remaining on the lease is used. A new + lease will not be available before the break period has expired, but the lease may be held for + longer than the break period. If this header does not appear with a break operation, a + fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease + breaks immediately. Default value is None. + :type break_period: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present, + specifies the share snapshot to query. Default value is None. + :type sharesnapshot: str + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword comp: comp. Default value is "lease". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword action: Describes what lease action to take. Default value is "break". Note that + overriding this default value may result in unsupported behavior. + :paramtype action: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + comp: Literal["lease"] = kwargs.pop("comp", _params.pop("comp", "lease")) + action: Literal["break"] = kwargs.pop("action", _headers.pop("x-ms-lease-action", "break")) + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_break_lease_request( + url=self._config.url, + timeout=timeout, + break_period=break_period, + lease_id=_lease_id, + request_id_parameter=request_id_parameter, + sharesnapshot=sharesnapshot, + comp=comp, + action=action, + restype=restype, + version=self._config.version, + template_url=self.break_lease.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-lease-time"] = self._deserialize("int", response.headers.get("x-ms-lease-time")) + response_headers["x-ms-lease-id"] = self._deserialize("str", response.headers.get("x-ms-lease-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + break_lease.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def create_snapshot( # pylint: disable=inconsistent-return-statements + self, timeout: Optional[int] = None, metadata: Optional[Dict[str, str]] = None, **kwargs: Any + ) -> None: + """Creates a read-only snapshot of a share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "snapshot". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["snapshot"] = kwargs.pop("comp", _params.pop("comp", "snapshot")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_create_snapshot_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.create_snapshot.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-snapshot"] = self._deserialize("str", response.headers.get("x-ms-snapshot")) + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_snapshot.metadata = {"url": "{url}/{shareName}"} + + @overload + def create_permission( # pylint: disable=inconsistent-return-statements + self, + share_permission: _models.SharePermission, + timeout: Optional[int] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. Required. + :type share_permission: ~azure.storage.fileshare.models.SharePermission + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_permission( # pylint: disable=inconsistent-return-statements + self, + share_permission: IO, + timeout: Optional[int] = None, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. Required. + :type share_permission: IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_permission( # pylint: disable=inconsistent-return-statements + self, share_permission: Union[_models.SharePermission, IO], timeout: Optional[int] = None, **kwargs: Any + ) -> None: + """Create a permission (a security descriptor). + + :param share_permission: A permission (a security descriptor) at the share level. Is either a + SharePermission type or a IO type. Required. + :type share_permission: ~azure.storage.fileshare.models.SharePermission or IO + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword content_type: Body Parameter content-type. Known values are: 'application/json'. + Default value is None. + :paramtype content_type: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission")) + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _json = None + _content = None + if isinstance(share_permission, (IO, bytes)): + _content = share_permission + else: + _json = self._serialize.body(share_permission, "SharePermission") + + request = build_create_permission_request( + url=self._config.url, + timeout=timeout, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + json=_json, + content=_content, + template_url=self.create_permission.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + response_headers["x-ms-file-permission-key"] = self._deserialize( + "str", response.headers.get("x-ms-file-permission-key") + ) + + if cls: + return cls(pipeline_response, None, response_headers) + + create_permission.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def get_permission( + self, file_permission_key: str, timeout: Optional[int] = None, **kwargs: Any + ) -> _models.SharePermission: + """Returns the permission (security descriptor) for a given key. + + :param file_permission_key: Key of the permission to be set for the directory/file. Required. + :type file_permission_key: str + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "filepermission". Note that overriding this default value + may result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: SharePermission or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.SharePermission + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["filepermission"] = kwargs.pop("comp", _params.pop("comp", "filepermission")) + cls: ClsType[_models.SharePermission] = kwargs.pop("cls", None) + + request = build_get_permission_request( + url=self._config.url, + file_permission_key=file_permission_key, + timeout=timeout, + file_request_intent=self._config.file_request_intent, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_permission.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("SharePermission", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_permission.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def set_properties( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + quota: Optional[int] = None, + access_tier: Optional[Union[str, _models.ShareAccessTier]] = None, + root_squash: Optional[Union[str, _models.ShareRootSquash]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Sets properties for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param quota: Specifies the maximum size of the share, in gigabytes. Default value is None. + :type quota: int + :param access_tier: Specifies the access tier of the share. Known values are: + "TransactionOptimized", "Hot", and "Cool". Default value is None. + :type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :param root_squash: Root squash to set on the share. Only valid for NFS shares. Known values + are: "NoRootSquash", "RootSquash", and "AllSquash". Default value is None. + :type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "properties". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["properties"] = kwargs.pop("comp", _params.pop("comp", "properties")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_properties_request( + url=self._config.url, + timeout=timeout, + quota=quota, + access_tier=access_tier, + lease_id=_lease_id, + root_squash=root_squash, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_properties.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_properties.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def set_metadata( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + metadata: Optional[Dict[str, str]] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> None: + """Sets one or more user-defined name-value pairs for the specified share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param metadata: A name-value pair to associate with a file storage object. Default value is + None. + :type metadata: dict[str, str] + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "metadata". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["metadata"] = kwargs.pop("comp", _params.pop("comp", "metadata")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_set_metadata_request( + url=self._config.url, + timeout=timeout, + metadata=metadata, + lease_id=_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.set_metadata.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_metadata.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def get_access_policy( + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> List[_models.SignedIdentifier]: + """Returns information about stored access policies specified on the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: list of SignedIdentifier or the result of cls(response) + :rtype: list[~azure.storage.fileshare.models.SignedIdentifier] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl")) + cls: ClsType[List[_models.SignedIdentifier]] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("[SignedIdentifier]", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_access_policy.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def set_access_policy( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + share_acl: Optional[List[_models.SignedIdentifier]] = None, + **kwargs: Any + ) -> None: + """Sets a stored access policy for use with shared access signatures. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :param share_acl: The ACL for the share. Default value is None. + :type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier] + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "acl". Note that overriding this default value may result + in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["acl"] = kwargs.pop("comp", _params.pop("comp", "acl")) + content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/xml")) + cls: ClsType[None] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + serialization_ctxt = {"xml": {"name": "SignedIdentifiers", "wrapped": True}} + if share_acl is not None: + _content = self._serialize.body( + share_acl, "[SignedIdentifier]", is_xml=True, serialization_ctxt=serialization_ctxt + ) + else: + _content = None + + request = build_set_access_policy_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + comp=comp, + content_type=content_type, + version=self._config.version, + content=_content, + template_url=self.set_access_policy.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + set_access_policy.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def get_statistics( + self, + timeout: Optional[int] = None, + lease_access_conditions: Optional[_models.LeaseAccessConditions] = None, + **kwargs: Any + ) -> _models.ShareStats: + """Retrieves statistics related to the share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param lease_access_conditions: Parameter group. Default value is None. + :type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "stats". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: ShareStats or the result of cls(response) + :rtype: ~azure.storage.fileshare.models.ShareStats + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["stats"] = kwargs.pop("comp", _params.pop("comp", "stats")) + cls: ClsType[_models.ShareStats] = kwargs.pop("cls", None) + + _lease_id = None + if lease_access_conditions is not None: + _lease_id = lease_access_conditions.lease_id + + request = build_get_statistics_request( + url=self._config.url, + timeout=timeout, + lease_id=_lease_id, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.get_statistics.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + deserialized = self._deserialize("ShareStats", pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + + get_statistics.metadata = {"url": "{url}/{shareName}"} + + @distributed_trace + def restore( # pylint: disable=inconsistent-return-statements + self, + timeout: Optional[int] = None, + request_id_parameter: Optional[str] = None, + deleted_share_name: Optional[str] = None, + deleted_share_version: Optional[str] = None, + **kwargs: Any + ) -> None: + """Restores a previously deleted Share. + + :param timeout: The timeout parameter is expressed in seconds. For more information, see + :code:`Setting + Timeouts for File Service Operations.`. Default value is None. + :type timeout: int + :param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character + limit that is recorded in the analytics logs when storage analytics logging is enabled. Default + value is None. + :type request_id_parameter: str + :param deleted_share_name: Specifies the name of the previously-deleted share. Default value is + None. + :type deleted_share_name: str + :param deleted_share_version: Specifies the version of the previously-deleted share. Default + value is None. + :type deleted_share_version: str + :keyword restype: restype. Default value is "share". Note that overriding this default value + may result in unsupported behavior. + :paramtype restype: str + :keyword comp: comp. Default value is "undelete". Note that overriding this default value may + result in unsupported behavior. + :paramtype comp: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: None or the result of cls(response) + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + restype: Literal["share"] = kwargs.pop("restype", _params.pop("restype", "share")) + comp: Literal["undelete"] = kwargs.pop("comp", _params.pop("comp", "undelete")) + cls: ClsType[None] = kwargs.pop("cls", None) + + request = build_restore_request( + url=self._config.url, + timeout=timeout, + request_id_parameter=request_id_parameter, + deleted_share_name=deleted_share_name, + deleted_share_version=deleted_share_version, + restype=restype, + comp=comp, + version=self._config.version, + template_url=self.restore.metadata["url"], + headers=_headers, + params=_params, + ) + request = _convert_request(request) + request.url = self._client.format_url(request.url) + + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + request, stream=False, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers["ETag"] = self._deserialize("str", response.headers.get("ETag")) + response_headers["Last-Modified"] = self._deserialize("rfc-1123", response.headers.get("Last-Modified")) + response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id")) + response_headers["x-ms-client-request-id"] = self._deserialize( + "str", response.headers.get("x-ms-client-request-id") + ) + response_headers["x-ms-version"] = self._deserialize("str", response.headers.get("x-ms-version")) + response_headers["Date"] = self._deserialize("rfc-1123", response.headers.get("Date")) + + if cls: + return cls(pipeline_response, None, response_headers) + + restore.metadata = {"url": "{url}/{shareName}"} diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_lease.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_lease.py new file mode 100644 index 00000000000..883fff2d7f7 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_lease.py @@ -0,0 +1,257 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import uuid + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, TypeVar, TYPE_CHECKING +) + +from azure.core.tracing.decorator import distributed_trace +from azure.core.exceptions import HttpResponseError + +from ._shared.response_handlers import return_response_headers, process_storage_error +from ._generated.operations import FileOperations, ShareOperations + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + ShareClient = TypeVar("ShareClient") + + +class ShareLeaseClient(object): # pylint: disable=client-accepts-api-version-keyword + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareClient or ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + def __init__( + self, client, lease_id=None + ): # pylint: disable=missing-client-constructor-parameter-credential,missing-client-constructor-parameter-kwargs + # type: (Union[ShareFileClient, ShareClient], Optional[str]) -> None + self.id = lease_id or str(uuid.uuid4()) + self.last_modified = None + self.etag = None + if hasattr(client, 'file_name'): + self._client = client._client.file # type: ignore # pylint: disable=protected-access + self._snapshot = None + elif hasattr(client, 'share_name'): + self._client = client._client.share + self._snapshot = client.snapshot + else: + raise TypeError("Lease must use ShareFileClient or ShareClient.") + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @distributed_trace + def acquire(self, **kwargs): + # type: (**Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file or share does not have an active lease, the File or Share service creates a + lease on the file and returns a new lease ID. + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + try: + lease_duration = kwargs.pop('lease_duration', -1) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace + def renew(self, **kwargs): + # type: (Any) -> None + """Renews the share lease. + + The share lease can be renewed if the lease ID specified in the + lease client matches that associated with the share. Note that + the lease may be renewed even if it has expired as long as the share + has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + if isinstance(self._client, FileOperations): + raise TypeError("Lease renewal operations are only valid for ShareClient.") + try: + response = self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + sharesnapshot=self._snapshot, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File or Share service will raise an error + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace + def break_lease(self, **kwargs): + # type: (Any) -> int + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + lease_break_period = kwargs.pop('lease_break_period', None) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + if isinstance(self._client, ShareOperations): + kwargs['break_period'] = lease_break_period + if isinstance(self._client, FileOperations) and lease_break_period: + raise TypeError("Setting a lease break period is only applicable to Share leases.") + + response = self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_models.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_models.py new file mode 100644 index 00000000000..ab0d874adcb --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_models.py @@ -0,0 +1,1022 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines +from urllib.parse import unquote +from enum import Enum + +from azure.core import CaseInsensitiveEnumMeta +from azure.core.paging import PageIterator +from azure.core.exceptions import HttpResponseError + +from ._parser import _parse_datetime_from_str +from ._shared.response_handlers import return_context_and_deserialized, process_storage_error +from ._shared.models import DictMixin, get_enum_value +from ._generated.models import Metrics as GeneratedMetrics +from ._generated.models import RetentionPolicy as GeneratedRetentionPolicy +from ._generated.models import CorsRule as GeneratedCorsRule +from ._generated.models import ShareProtocolSettings as GeneratedShareProtocolSettings +from ._generated.models import ShareSmbSettings as GeneratedShareSmbSettings +from ._generated.models import SmbMultichannel as GeneratedSmbMultichannel +from ._generated.models import AccessPolicy as GenAccessPolicy +from ._generated.models import DirectoryItem + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class Metrics(GeneratedMetrics): + """A summary of request statistics grouped by API in hour or minute aggregates + for files. + + All required parameters must be populated in order to send to Azure. + + :keyword str version: The version of Storage Analytics to configure. + :keyword bool enabled: Required. Indicates whether metrics are enabled for the + File service. + :keyword bool include_ap_is: Indicates whether metrics should generate summary + statistics for called API operations. + :keyword ~azure.storage.fileshare.RetentionPolicy retention_policy: Determines how long the associated data should + persist. + """ + + def __init__(self, **kwargs): + self.version = kwargs.get('version', u'1.0') + self.enabled = kwargs.get('enabled', False) + self.include_apis = kwargs.get('include_apis') + self.retention_policy = kwargs.get('retention_policy') or RetentionPolicy() + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + version=generated.version, + enabled=generated.enabled, + include_apis=generated.include_apis, + retention_policy=RetentionPolicy._from_generated(generated.retention_policy) # pylint: disable=protected-access + ) + + +class RetentionPolicy(GeneratedRetentionPolicy): + """The retention policy which determines how long the associated data should + persist. + + All required parameters must be populated in order to send to Azure. + + :param bool enabled: Required. Indicates whether a retention policy is enabled + for the storage service. + :param int days: Indicates the number of days that metrics or logging or + soft-deleted data should be retained. All data older than this value will + be deleted. + """ + + def __init__(self, enabled=False, days=None): + self.enabled = enabled + self.days = days + if self.enabled and (self.days is None): + raise ValueError("If policy is enabled, 'days' must be specified.") + + @classmethod + def _from_generated(cls, generated): + if not generated: + return cls() + return cls( + enabled=generated.enabled, + days=generated.days, + ) + + +class CorsRule(GeneratedCorsRule): + """CORS is an HTTP feature that enables a web application running under one + domain to access resources in another domain. Web browsers implement a + security restriction known as same-origin policy that prevents a web page + from calling APIs in a different domain; CORS provides a secure way to + allow one domain (the origin domain) to call APIs in another domain. + + All required parameters must be populated in order to send to Azure. + + :param list(str) allowed_origins: + A list of origin domains that will be allowed via CORS, or "*" to allow + all domains. The list of must contain at least one entry. Limited to 64 + origin domains. Each allowed origin can have up to 256 characters. + :param list(str) allowed_methods: + A list of HTTP methods that are allowed to be executed by the origin. + The list of must contain at least one entry. For Azure Storage, + permitted methods are DELETE, GET, HEAD, MERGE, POST, OPTIONS or PUT. + :keyword list(str) allowed_headers: + Defaults to an empty list. A list of headers allowed to be part of + the cross-origin request. Limited to 64 defined headers and 2 prefixed + headers. Each header can be up to 256 characters. + :keyword list(str) exposed_headers: + Defaults to an empty list. A list of response headers to expose to CORS + clients. Limited to 64 defined headers and two prefixed headers. Each + header can be up to 256 characters. + :keyword int max_age_in_seconds: + The number of seconds that the client/browser should cache a + preflight response. + """ + + def __init__(self, allowed_origins, allowed_methods, **kwargs): + self.allowed_origins = ','.join(allowed_origins) + self.allowed_methods = ','.join(allowed_methods) + self.allowed_headers = ','.join(kwargs.get('allowed_headers', [])) + self.exposed_headers = ','.join(kwargs.get('exposed_headers', [])) + self.max_age_in_seconds = kwargs.get('max_age_in_seconds', 0) + + @classmethod + def _from_generated(cls, generated): + return cls( + [generated.allowed_origins], + [generated.allowed_methods], + allowed_headers=[generated.allowed_headers], + exposed_headers=[generated.exposed_headers], + max_age_in_seconds=generated.max_age_in_seconds, + ) + + +class ShareSmbSettings(GeneratedShareSmbSettings): + """ Settings for the SMB protocol. + + :keyword SmbMultichannel multichannel: Sets the multichannel settings. + """ + def __init__(self, **kwargs): + self.multichannel = kwargs.get('multichannel') + if self.multichannel is None: + raise ValueError("The value 'multichannel' must be specified.") + + +class SmbMultichannel(GeneratedSmbMultichannel): + """ Settings for Multichannel. + + :keyword bool enabled: If SMB Multichannel is enabled. + """ + def __init__(self, **kwargs): + self.enabled = kwargs.get('enabled') + if self.enabled is None: + raise ValueError("The value 'enabled' must be specified.") + + +class ShareProtocolSettings(GeneratedShareProtocolSettings): + """Protocol Settings class used by the set and get service properties methods in the share service. + + Contains protocol properties of the share service such as the SMB setting of the share service. + + :keyword SmbSettings smb: Sets SMB settings. + """ + def __init__(self, **kwargs): + self.smb = kwargs.get('smb') + if self.smb is None: + raise ValueError("The value 'smb' must be specified.") + + @classmethod + def _from_generated(cls, generated): + return cls( + smb=generated.smb) + + +class AccessPolicy(GenAccessPolicy): + """Access Policy class used by the set and get acl methods in each service. + + A stored access policy can specify the start time, expiry time, and + permissions for the Shared Access Signatures with which it's associated. + Depending on how you want to control access to your resource, you can + specify all of these parameters within the stored access policy, and omit + them from the URL for the Shared Access Signature. Doing so permits you to + modify the associated signature's behavior at any time, as well as to revoke + it. Or you can specify one or more of the access policy parameters within + the stored access policy, and the others on the URL. Finally, you can + specify all of the parameters on the URL. In this case, you can use the + stored access policy to revoke the signature, but not to modify its behavior. + + Together the Shared Access Signature and the stored access policy must + include all fields required to authenticate the signature. If any required + fields are missing, the request will fail. Likewise, if a field is specified + both in the Shared Access Signature URL and in the stored access policy, the + request will fail with status code 400 (Bad Request). + + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ~azure.storage.fileshare.FileSasPermissions or + ~azure.storage.fileshare.ShareSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + """ + def __init__(self, permission=None, expiry=None, start=None): + self.start = start + self.expiry = expiry + self.permission = permission + + +class LeaseProperties(DictMixin): + """File or Share Lease Properties. + + :ivar str status: + The lease status of the file or share. Possible values: locked|unlocked + :ivar str state: + Lease state of the file or share. Possible values: available|leased|expired|breaking|broken + :ivar str duration: + When a file or share is leased, specifies whether the lease is of infinite or fixed duration. + """ + + def __init__(self, **kwargs): + self.status = get_enum_value(kwargs.get('x-ms-lease-status')) + self.state = get_enum_value(kwargs.get('x-ms-lease-state')) + self.duration = get_enum_value(kwargs.get('x-ms-lease-duration')) + + @classmethod + def _from_generated(cls, generated): + lease = cls() + lease.status = get_enum_value(generated.properties.lease_status) + lease.state = get_enum_value(generated.properties.lease_state) + lease.duration = get_enum_value(generated.properties.lease_duration) + return lease + + +class ContentSettings(DictMixin): + """Used to store the content settings of a file. + + :param str content_type: + The content type specified for the file. If no content type was + specified, the default content type is application/octet-stream. + :param str content_encoding: + If the content_encoding has previously been set + for the file, that value is stored. + :param str content_language: + If the content_language has previously been set + for the file, that value is stored. + :param str content_disposition: + content_disposition conveys additional information about how to + process the response payload, and also can be used to attach + additional metadata. If content_disposition has previously been set + for the file, that value is stored. + :param str cache_control: + If the cache_control has previously been set for + the file, that value is stored. + :param bytearray content_md5: + If the content_md5 has been set for the file, this response + header is stored so that the client can check for message content + integrity. + """ + + def __init__( + self, content_type=None, content_encoding=None, + content_language=None, content_disposition=None, + cache_control=None, content_md5=None, **kwargs): + + self.content_type = content_type or kwargs.get('Content-Type') + self.content_encoding = content_encoding or kwargs.get('Content-Encoding') + self.content_language = content_language or kwargs.get('Content-Language') + self.content_md5 = content_md5 or kwargs.get('Content-MD5') + self.content_disposition = content_disposition or kwargs.get('Content-Disposition') + self.cache_control = cache_control or kwargs.get('Cache-Control') + + @classmethod + def _from_generated(cls, generated): + settings = cls() + settings.content_type = generated.properties.content_type or None + settings.content_encoding = generated.properties.content_encoding or None + settings.content_language = generated.properties.content_language or None + settings.content_md5 = generated.properties.content_md5 or None + settings.content_disposition = generated.properties.content_disposition or None + settings.cache_control = generated.properties.cache_control or None + return settings + + +class ShareProperties(DictMixin): + """Share's properties class. + + :ivar str name: + The name of the share. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the share was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int quota: + The allocated quota. + :ivar str access_tier: + The share's access tier. + :ivar dict metadata: A dict with name_value pairs to associate with the + share as metadata. + :ivar str snapshot: + Snapshot of the share. + :ivar bool deleted: + To indicate if this share is deleted or not. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar datetime deleted: + To indicate the deleted time of the deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar str version: + To indicate the version of deleted share. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar int remaining_retention_days: + To indicate how many remaining days the deleted share will be kept. + This is a service returned value, and the value will be set when list shared including deleted ones. + :ivar int provisioned_bandwidth: + Provisioned bandwidth in megabits/second. Only applicable to premium file accounts. + :ivar ~azure.storage.fileshare.models.ShareRootSquash or str root_squash: + Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :ivar list(str) protocols: + Indicates the protocols enabled on the share. The protocol can be either SMB or NFS. + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.quota = kwargs.get('x-ms-share-quota') + self.access_tier = kwargs.get('x-ms-access-tier') + self.next_allowed_quota_downgrade_time = kwargs.get('x-ms-share-next-allowed-quota-downgrade-time') + self.metadata = kwargs.get('metadata') + self.snapshot = None + self.deleted = None + self.deleted_time = None + self.version = None + self.remaining_retention_days = None + self.provisioned_egress_mbps = kwargs.get('x-ms-share-provisioned-egress-mbps') + self.provisioned_ingress_mbps = kwargs.get('x-ms-share-provisioned-ingress-mbps') + self.provisioned_iops = kwargs.get('x-ms-share-provisioned-iops') + self.provisioned_bandwidth = kwargs.get('x-ms-share-provisioned-bandwidth-mibps') + self.lease = LeaseProperties(**kwargs) + self.protocols = [protocol.strip() for protocol in kwargs.get('x-ms-enabled-protocols', None).split(',')]\ + if kwargs.get('x-ms-enabled-protocols', None) else None + self.root_squash = kwargs.get('x-ms-root-squash', None) + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = generated.name + props.last_modified = generated.properties.last_modified + props.etag = generated.properties.etag + props.quota = generated.properties.quota + props.access_tier = generated.properties.access_tier + props.next_allowed_quota_downgrade_time = generated.properties.next_allowed_quota_downgrade_time + props.metadata = generated.metadata + props.snapshot = generated.snapshot + props.deleted = generated.deleted + props.deleted_time = generated.properties.deleted_time + props.version = generated.version + props.remaining_retention_days = generated.properties.remaining_retention_days + props.provisioned_egress_mbps = generated.properties.provisioned_egress_m_bps + props.provisioned_ingress_mbps = generated.properties.provisioned_ingress_m_bps + props.provisioned_iops = generated.properties.provisioned_iops + props.provisioned_bandwidth = generated.properties.provisioned_bandwidth_mi_bps + props.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access + props.protocols = [protocol.strip() for protocol in generated.properties.enabled_protocols.split(',')]\ + if generated.properties.enabled_protocols else None + props.root_squash = generated.properties.root_squash + + return props + + +class SharePropertiesPaged(PageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + prefix=self.prefix, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class Handle(DictMixin): + """A listed Azure Storage handle item. + + All required parameters must be populated in order to send to Azure. + + :keyword str handle_id: Required. XSMB service handle ID + :keyword str path: Required. File or directory name including full path starting + from share root + :keyword str file_id: Required. FileId uniquely identifies the file or + directory. + :keyword str parent_id: ParentId uniquely identifies the parent directory of the + object. + :keyword str session_id: Required. SMB session ID in context of which the file + handle was opened + :keyword str client_ip: Required. Client IP that opened the handle + :keyword ~datetime.datetime open_time: Required. Time when the session that previously opened + the handle has last been reconnected. (UTC) + :keyword ~datetime.datetime last_reconnect_time: Time handle was last connected to (UTC) + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('handle_id') + self.path = kwargs.get('path') + self.file_id = kwargs.get('file_id') + self.parent_id = kwargs.get('parent_id') + self.session_id = kwargs.get('session_id') + self.client_ip = kwargs.get('client_ip') + self.open_time = kwargs.get('open_time') + self.last_reconnect_time = kwargs.get('last_reconnect_time') + + @classmethod + def _from_generated(cls, generated): + handle = cls() + handle.id = generated.handle_id + handle.path = unquote(generated.path.content) if generated.path.encoded else generated.path.content + handle.file_id = generated.file_id + handle.parent_id = generated.parent_id + handle.session_id = generated.session_id + handle.client_ip = generated.client_ip + handle.open_time = generated.open_time + handle.last_reconnect_time = generated.last_reconnect_time + return handle + + +class HandlesPaged(PageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryProperties(DictMixin): + """Directory's properties class. + + :ivar str name: + The name of the directory. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the directory was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar bool server_encrypted: + Whether encryption is enabled. + :keyword dict metadata: A dict with name_value pairs to associate with the + directory as metadata. + :ivar change_time: Change time for the file. + :vartype change_time: str or ~datetime.datetime + :ivar creation_time: Creation time for the file. + :vartype creation_time: str or ~datetime.datetime + :ivar last_write_time: Last write time for the file. + :vartype last_write_time: str or ~datetime.datetime + :ivar last_access_time: Last access time for the file. + :vartype last_access_time: ~datetime.datetime + :ivar file_attributes: + The file system attributes for files and directories. + :vartype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :ivar permission_key: Key of the permission to be set for the + directory/file. + :vartype permission_key: str + :ivar file_id: Required. FileId uniquely identifies the file or + directory. + :vartype file_id: str + :ivar parent_id: ParentId uniquely identifies the parent directory of the + object. + :vartype parent_id: str + """ + + def __init__(self, **kwargs): + self.name = None + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.metadata = kwargs.get('metadata') + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.last_access_time = None + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + self.is_directory = True + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = unquote(generated.name.content) if generated.name.encoded else generated.name.content + props.file_id = generated.file_id + props.file_attributes = generated.attributes + props.last_modified = generated.properties.last_modified + props.creation_time = generated.properties.creation_time + props.last_access_time = generated.properties.last_access_time + props.last_write_time = generated.properties.last_write_time + props.change_time = generated.properties.change_time + props.etag = generated.properties.etag + props.permission_key = generated.permission_key + return props + + +class DirectoryPropertiesPaged(PageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + def _get_next_cb(self, continuation_token): + try: + return self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access + self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access + return self._response.next_marker or None, self.current_page + + +class FileProperties(DictMixin): + """File's properties class. + + :ivar str name: + The name of the file. + :ivar str path: + The path of the file. + :ivar str share: + The name of share. + :ivar str snapshot: + File snapshot. + :ivar int content_length: + Size of file in bytes. + :ivar dict metadata: A dict with name_value pairs to associate with the + file as metadata. + :ivar str file_type: + Type of the file. + :ivar ~datetime.datetime last_modified: + A datetime object representing the last time the file was modified. + :ivar str etag: + The ETag contains a value that you can use to perform operations + conditionally. + :ivar int size: + Size of file in bytes. + :ivar str content_range: + The range of bytes. + :ivar bool server_encrypted: + Whether encryption is enabled. + :ivar copy: + The copy properties. + :vartype copy: ~azure.storage.fileshare.CopyProperties + :ivar content_settings: + The content settings for the file. + :vartype content_settings: ~azure.storage.fileshare.ContentSettings + """ + + def __init__(self, **kwargs): + self.name = kwargs.get('name') + self.path = None + self.share = None + self.snapshot = None + self.content_length = kwargs.get('Content-Length') + self.metadata = kwargs.get('metadata') + self.file_type = kwargs.get('x-ms-type') + self.last_modified = kwargs.get('Last-Modified') + self.etag = kwargs.get('ETag') + self.size = kwargs.get('Content-Length') + self.content_range = kwargs.get('Content-Range') + self.server_encrypted = kwargs.get('x-ms-server-encrypted') + self.copy = CopyProperties(**kwargs) + self.content_settings = ContentSettings(**kwargs) + self.lease = LeaseProperties(**kwargs) + self.change_time = _parse_datetime_from_str(kwargs.get('x-ms-file-change-time')) + self.creation_time = _parse_datetime_from_str(kwargs.get('x-ms-file-creation-time')) + self.last_write_time = _parse_datetime_from_str(kwargs.get('x-ms-file-last-write-time')) + self.last_access_time = None + self.file_attributes = kwargs.get('x-ms-file-attributes') + self.permission_key = kwargs.get('x-ms-file-permission-key') + self.file_id = kwargs.get('x-ms-file-id') + self.parent_id = kwargs.get('x-ms-file-parent-id') + self.is_directory = False + + @classmethod + def _from_generated(cls, generated): + props = cls() + props.name = unquote(generated.name.content) if generated.name.encoded else generated.name.content + props.file_id = generated.file_id + props.etag = generated.properties.etag + props.file_attributes = generated.attributes + props.last_modified = generated.properties.last_modified + props.creation_time = generated.properties.creation_time + props.last_access_time = generated.properties.last_access_time + props.last_write_time = generated.properties.last_write_time + props.change_time = generated.properties.change_time + props.size = generated.properties.content_length + props.permission_key = generated.permission_key + return props + + +class ShareProtocols(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Enabled protocols on the share""" + SMB = "SMB" + NFS = "NFS" + + +class CopyProperties(DictMixin): + """File Copy Properties. + + :ivar str id: + String identifier for the last attempted Copy File operation where this file + was the destination file. This header does not appear if this file has never + been the destination in a Copy File operation, or if this file has been + modified after a concluded Copy File operation. + :ivar str source: + URL up to 2 KB in length that specifies the source file used in the last attempted + Copy File operation where this file was the destination file. This header does not + appear if this file has never been the destination in a Copy File operation, or if + this file has been modified after a concluded Copy File operation. + :ivar str status: + State of the copy operation identified by Copy ID, with these values: + success: + Copy completed successfully. + pending: + Copy is in progress. Check copy_status_description if intermittent, + non-fatal errors impede copy progress but don't cause failure. + aborted: + Copy was ended by Abort Copy File. + failed: + Copy failed. See copy_status_description for failure details. + :ivar str progress: + Contains the number of bytes copied and the total bytes in the source in the last + attempted Copy File operation where this file was the destination file. Can show + between 0 and Content-Length bytes copied. + :ivar datetime completion_time: + Conclusion time of the last attempted Copy File operation where this file was the + destination file. This value can specify the time of a completed, aborted, or + failed copy attempt. + :ivar str status_description: + Only appears when x-ms-copy-status is failed or pending. Describes cause of fatal + or non-fatal copy operation failure. + :ivar bool incremental_copy: + Copies the snapshot of the source file to a destination file. + The snapshot is copied such that only the differential changes between + the previously copied snapshot are transferred to the destination + :ivar datetime destination_snapshot: + Included if the file is incremental copy or incremental copy snapshot, + if x-ms-copy-status is success. Snapshot time of the last successful + incremental copy snapshot for this file. + """ + + def __init__(self, **kwargs): + self.id = kwargs.get('x-ms-copy-id') + self.source = kwargs.get('x-ms-copy-source') + self.status = get_enum_value(kwargs.get('x-ms-copy-status')) + self.progress = kwargs.get('x-ms-copy-progress') + self.completion_time = kwargs.get('x-ms-copy-completion_time') + self.status_description = kwargs.get('x-ms-copy-status-description') + self.incremental_copy = kwargs.get('x-ms-incremental-copy') + self.destination_snapshot = kwargs.get('x-ms-copy-destination-snapshot') + + @classmethod + def _from_generated(cls, generated): + copy = cls() + copy.id = generated.properties.copy_id or None + copy.status = get_enum_value(generated.properties.copy_status) or None + copy.source = generated.properties.copy_source or None + copy.progress = generated.properties.copy_progress or None + copy.completion_time = generated.properties.copy_completion_time or None + copy.status_description = generated.properties.copy_status_description or None + copy.incremental_copy = generated.properties.incremental_copy or None + copy.destination_snapshot = generated.properties.destination_snapshot or None + return copy + + +class FileSasPermissions(object): + """FileSasPermissions class to be used with + generating shared access signature operations. + + :param bool read: + Read the content, properties, metadata. Use the file as the source of a copy + operation. + :param bool create: + Create a new file or copy a file to a new file. + :param bool write: + Create or write content, properties, metadata. Resize the file. Use the file + as the destination of a copy operation within the same account. + :param bool delete: + Delete the file. + """ + def __init__(self, read=False, create=False, write=False, delete=False): + self.read = read + self.create = create + self.write = write + self.delete = delete + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a FileSasPermissions from a string. + + To specify read, create, write, or delete permissions you need only to + include the first letter of the word in the string. E.g. For read and + create permissions, you would provide a string "rc". + + :param str permission: The string which dictates the read, create, + write, or delete permissions + :return: A FileSasPermissions object + :rtype: ~azure.storage.fileshare.FileSasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + + parsed = cls(p_read, p_create, p_write, p_delete) + + return parsed + + +class ShareSasPermissions(object): + """ShareSasPermissions class to be used to be used with + generating shared access signature and access policy operations. + + :param bool read: + Read the content, properties or metadata of any file in the share. Use any + file in the share as the source of a copy operation. + :param bool write: + For any file in the share, create or write content, properties or metadata. + Resize the file. Use the file as the destination of a copy operation within + the same account. + Note: You cannot grant permissions to read or write share properties or + metadata with a service SAS. Use an account SAS instead. + :param bool delete: + Delete any file in the share. + Note: You cannot grant permissions to delete a share with a service SAS. Use + an account SAS instead. + :param bool list: + List files and directories in the share. + :param bool create: + Create a new file in the share, or copy a file to a new file in the share. + """ + def __init__(self, read=False, write=False, delete=False, list=False, create=False): # pylint: disable=redefined-builtin + self.read = read + self.create = create + self.write = write + self.delete = delete + self.list = list + self._str = (('r' if self.read else '') + + ('c' if self.create else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('l' if self.list else '')) + + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create a ShareSasPermissions from a string. + + To specify read, create, write, delete, or list permissions you need only to + include the first letter of the word in the string. E.g. For read and + write permissions, you would provide a string "rw". + + :param str permission: The string which dictates the read, create, write, + delete, or list permissions + :return: A ShareSasPermissions object + :rtype: ~azure.storage.fileshare.ShareSasPermissions + """ + p_read = 'r' in permission + p_create = 'c' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_list = 'l' in permission + + parsed = cls(p_read, p_write, p_delete, p_list, p_create) + + return parsed + +class NTFSAttributes(object): + """ + Valid set of attributes to set for file or directory. + To set attribute for directory, 'Directory' should always be enabled except setting 'None' for directory. + + :ivar bool read_only: + Enable/disable 'ReadOnly' attribute for DIRECTORY or FILE + :ivar bool hidden: + Enable/disable 'Hidden' attribute for DIRECTORY or FILE + :ivar bool system: + Enable/disable 'System' attribute for DIRECTORY or FILE + :ivar bool none: + Enable/disable 'None' attribute for DIRECTORY or FILE to clear all attributes of FILE/DIRECTORY + :ivar bool directory: + Enable/disable 'Directory' attribute for DIRECTORY + :ivar bool archive: + Enable/disable 'Archive' attribute for DIRECTORY or FILE + :ivar bool temporary: + Enable/disable 'Temporary' attribute for FILE + :ivar bool offline: + Enable/disable 'Offline' attribute for DIRECTORY or FILE + :ivar bool not_content_indexed: + Enable/disable 'NotContentIndexed' attribute for DIRECTORY or FILE + :ivar bool no_scrub_data: + Enable/disable 'NoScrubData' attribute for DIRECTORY or FILE + """ + def __init__(self, read_only=False, hidden=False, system=False, none=False, directory=False, archive=False, + temporary=False, offline=False, not_content_indexed=False, no_scrub_data=False): + + self.read_only = read_only + self.hidden = hidden + self.system = system + self.none = none + self.directory = directory + self.archive = archive + self.temporary = temporary + self.offline = offline + self.not_content_indexed = not_content_indexed + self.no_scrub_data = no_scrub_data + self._str = (('ReadOnly|' if self.read_only else '') + + ('Hidden|' if self.hidden else '') + + ('System|' if self.system else '') + + ('None|' if self.none else '') + + ('Directory|' if self.directory else '') + + ('Archive|' if self.archive else '') + + ('Temporary|' if self.temporary else '') + + ('Offline|' if self.offline else '') + + ('NotContentIndexed|' if self.not_content_indexed else '') + + ('NoScrubData|' if self.no_scrub_data else '')) + + def __str__(self): + concatenated_params = self._str + return concatenated_params.strip('|') + + @classmethod + def from_string(cls, string): + """Create a NTFSAttributes from a string. + + To specify permissions you can pass in a string with the + desired permissions, e.g. "ReadOnly|Hidden|System" + + :param str string: The string which dictates the permissions. + :return: A NTFSAttributes object + :rtype: ~azure.storage.fileshare.NTFSAttributes + """ + read_only = "ReadOnly" in string + hidden = "Hidden" in string + system = "System" in string + none = "None" in string + directory = "Directory" in string + archive = "Archive" in string + temporary = "Temporary" in string + offline = "Offline" in string + not_content_indexed = "NotContentIndexed" in string + no_scrub_data = "NoScrubData" in string + + parsed = cls(read_only, hidden, system, none, directory, archive, temporary, offline, not_content_indexed, + no_scrub_data) + parsed._str = string # pylint: disable = protected-access + return parsed + + +def service_properties_deserialize(generated): + """Deserialize a ServiceProperties objects into a dict. + """ + return { + 'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access + 'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access + 'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access + 'protocol': ShareProtocolSettings._from_generated(generated.protocol), # pylint: disable=protected-access + } diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_parser.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_parser.py new file mode 100644 index 00000000000..bc6795321c5 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_parser.py @@ -0,0 +1,44 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import datetime, timedelta + +_ERROR_TOO_MANY_FILE_PERMISSIONS = 'file_permission and file_permission_key should not be set at the same time' +_FILE_PERMISSION_TOO_LONG = 'Size of file_permission is too large. file_permission should be <=8KB, else' \ + 'please use file_permission_key' + + +def _get_file_permission(file_permission, file_permission_key, default_permission): + # if file_permission and file_permission_key are both empty, then use the default_permission + # value as file permission, file_permission size should be <= 8KB, else file permission_key should be used + if file_permission and len(str(file_permission).encode('utf-8')) > 8 * 1024: + raise ValueError(_FILE_PERMISSION_TOO_LONG) + + if not file_permission: + if not file_permission_key: + return default_permission + return None + + if not file_permission_key: + return file_permission + + raise ValueError(_ERROR_TOO_MANY_FILE_PERMISSIONS) + + +def _parse_datetime_from_str(string_datetime): + if not string_datetime: + return None + dt, _, us = string_datetime.partition(".") + dt = datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S") + us = int(us[:-2]) # microseconds + datetime_obj = dt + timedelta(microseconds=us) + return datetime_obj + + +def _datetime_to_str(datetime_obj): + if not datetime_obj: + return None + return datetime_obj if isinstance(datetime_obj, str) else datetime_obj.isoformat() + '0Z' diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_serialize.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_serialize.py new file mode 100644 index 00000000000..2daf76b2785 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_serialize.py @@ -0,0 +1,176 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use +from typing import Any, Dict, TypeVar, TYPE_CHECKING + +from azure.core import MatchConditions + +from ._parser import _datetime_to_str, _get_file_permission +from ._generated.models import ( + SourceModifiedAccessConditions, + LeaseAccessConditions, + SourceLeaseAccessConditions, + DestinationLeaseAccessConditions, + CopyFileSmbInfo) + +if TYPE_CHECKING: + ShareLeaseClient = TypeVar("ShareLeaseClient") + + +_SUPPORTED_API_VERSIONS = [ + '2019-02-02', + '2019-07-07', + '2019-10-10', + '2019-12-12', + '2020-02-10', + '2020-04-08', + '2020-06-12', + '2020-08-04', + '2020-10-02', + '2021-02-12', + '2021-04-10', + '2021-06-08', + '2021-08-06', + '2021-12-02', + '2022-11-02' +] + + +def _get_match_headers(kwargs, match_param, etag_param): + # type: (Dict[str, Any], str, str) -> Tuple(Optional[str], Optional[str]) + # TODO: extract this method to shared folder also add some comments, so that share, datalake and blob can use it. + if_match = None + if_none_match = None + match_condition = kwargs.pop(match_param, None) + if match_condition == MatchConditions.IfNotModified: + if_match = kwargs.pop(etag_param, None) + if not if_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfPresent: + if_match = '*' + elif match_condition == MatchConditions.IfModified: + if_none_match = kwargs.pop(etag_param, None) + if not if_none_match: + raise ValueError("'{}' specified without '{}'.".format(match_param, etag_param)) + elif match_condition == MatchConditions.IfMissing: + if_none_match = '*' + elif match_condition is None: + if etag_param in kwargs: + raise ValueError("'{}' specified without '{}'.".format(etag_param, match_param)) + else: + raise TypeError("Invalid match condition: {}".format(match_condition)) + return if_match, if_none_match + + +def get_source_conditions(kwargs): + # type: (Dict[str, Any]) -> SourceModifiedAccessConditions + if_match, if_none_match = _get_match_headers(kwargs, 'source_match_condition', 'source_etag') + return SourceModifiedAccessConditions( + source_if_modified_since=kwargs.pop('source_if_modified_since', None), + source_if_unmodified_since=kwargs.pop('source_if_unmodified_since', None), + source_if_match=if_match or kwargs.pop('source_if_match', None), + source_if_none_match=if_none_match or kwargs.pop('source_if_none_match', None) + ) + + +def get_access_conditions(lease): + # type: (ShareLeaseClient or str) -> LeaseAccessConditions or None + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return LeaseAccessConditions(lease_id=lease_id) if lease_id else None + + +def get_source_access_conditions(lease): + # type: (ShareLeaseClient or str) -> SourceLeaseAccessConditions or None + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return SourceLeaseAccessConditions(source_lease_id=lease_id) if lease_id else None + + +def get_dest_access_conditions(lease): + # type: (ShareLeaseClient or str) -> DestinationLeaseAccessConditions or None + try: + lease_id = lease.id # type: ignore + except AttributeError: + lease_id = lease # type: ignore + return DestinationLeaseAccessConditions(destination_lease_id=lease_id) if lease_id else None + + +def get_smb_properties(kwargs): + # type: (Dict[str, Any]) -> Dict[str, Any] + ignore_read_only = kwargs.pop('ignore_read_only', None) + set_archive_attribute = kwargs.pop('set_archive_attribute', None) + file_permission = kwargs.pop('file_permission', None) + file_permission_key = kwargs.pop('permission_key', None) + file_attributes = kwargs.pop('file_attributes', None) + file_creation_time = kwargs.pop('file_creation_time', None) + file_last_write_time = kwargs.pop('file_last_write_time', None) + file_change_time = kwargs.pop('file_change_time', None) + + file_permission_copy_mode = None + file_permission = _get_file_permission(file_permission, file_permission_key, None) + + if file_permission: + if file_permission.lower() == "source": + file_permission = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + elif file_permission_key: + if file_permission_key.lower() == "source": + file_permission_key = None + file_permission_copy_mode = "source" + else: + file_permission_copy_mode = "override" + return { + 'file_permission': file_permission, + 'file_permission_key': file_permission_key, + 'copy_file_smb_info': CopyFileSmbInfo( + file_permission_copy_mode=file_permission_copy_mode, + ignore_read_only=ignore_read_only, + file_attributes=file_attributes, + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + set_archive_attribute=set_archive_attribute + ) + + } + + +def get_rename_smb_properties(kwargs): + # type: (dict[str, Any]) -> dict[str, Any] + file_permission = kwargs.pop('file_permission', None) + file_permission_key = kwargs.pop('permission_key', None) + file_attributes = kwargs.pop('file_attributes', None) + file_creation_time = kwargs.pop('file_creation_time', None) + file_last_write_time = kwargs.pop('file_last_write_time', None) + file_change_time = kwargs.pop('file_change_time', None) + + file_permission = _get_file_permission(file_permission, file_permission_key, None) + + return { + 'file_permission': file_permission, + 'file_permission_key': file_permission_key, + 'copy_file_smb_info': CopyFileSmbInfo( + file_attributes=file_attributes, + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time) + )} + + +def get_api_version(kwargs): + # type: (Dict[str, Any]) -> str + api_version = kwargs.get('api_version', None) + if api_version and api_version not in _SUPPORTED_API_VERSIONS: + versions = '\n'.join(_SUPPORTED_API_VERSIONS) + raise ValueError("Unsupported API version '{}'. Please select from:\n{}".format(api_version, versions)) + return api_version or _SUPPORTED_API_VERSIONS[-1] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_share_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_share_client.py new file mode 100644 index 00000000000..c6298d86521 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_share_client.py @@ -0,0 +1,1003 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +import sys +from typing import ( + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) +from urllib.parse import urlparse, quote, unquote + +from typing_extensions import Self + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.request_handlers import add_metadata_headers, serialize_iso +from ._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from ._generated import AzureFileStorage +from ._generated.models import ( + SignedIdentifier, + DeleteSnapshotsOptionType, + SharePermission) +from ._deserialize import deserialize_share_properties, deserialize_permission_key, deserialize_permission +from ._serialize import get_api_version, get_access_conditions +from ._directory_client import ShareDirectoryClient +from ._file_client import ShareFileClient +from ._lease import ShareLeaseClient +from ._models import ShareProtocols + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from ._models import ShareProperties, AccessPolicy + + +class ShareClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-methods + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( + self, account_url: str, + share_name: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not share_name: + raise ValueError("Please specify a share name.") + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + path_snapshot = None + path_snapshot, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + try: + self.snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + self.snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + self.snapshot = snapshot or path_snapshot + + self.share_name = share_name + self._query_str, credential = self._format_query_string( + sas_token, credential, share_snapshot=self.snapshot) + super(ShareClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @classmethod + def from_share_url( + cls, share_url: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """ + :param str share_url: The full URI to the share. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + """ + try: + if not share_url.lower().startswith('http'): + share_url = "https://" + share_url + except AttributeError: + raise ValueError("Share URL must be a string.") + parsed_url = urlparse(share_url.rstrip('/')) + if not (parsed_url.path and parsed_url.netloc): + raise ValueError("Invalid URL: {}".format(share_url)) + + share_path = parsed_url.path.lstrip('/').split('/') + account_path = "" + if len(share_path) > 1: + account_path = "/" + "/".join(share_path[:-1]) + account_url = "{}://{}{}?{}".format( + parsed_url.scheme, + parsed_url.netloc.rstrip('/'), + account_path, + parsed_url.query) + + share_name = unquote(share_path[-1]) + path_snapshot, _ = parse_query(parsed_url.query) + if snapshot: + try: + path_snapshot = snapshot.snapshot # type: ignore + except AttributeError: + try: + path_snapshot = snapshot['snapshot'] # type: ignore + except TypeError: + path_snapshot = snapshot + + if not share_name: + raise ValueError("Invalid URL. Please provide a URL with a valid share name") + return cls(account_url, share_name, path_snapshot, credential, **kwargs) + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + share_name = self.share_name + if isinstance(share_name, str): + share_name = share_name.encode('UTF-8') + return "{}://{}/{}{}".format( + self.scheme, + hostname, + quote(share_name), + self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str: str, + share_name: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create ShareClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param share_name: The name of the share. + :type share_name: str + :param str snapshot: + The optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A share client. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_client_from_conn_string] + :end-before: [END create_share_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Gets the share client from connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls( + account_url, share_name=share_name, snapshot=snapshot, credential=credential, **kwargs) + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=_pipeline, + _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, token_intent=self.file_request_intent, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot) + + @distributed_trace + def acquire_lease(self, **kwargs): + # type: (**Any) -> ShareLeaseClient + """Requests a new lease. + + If the share does not have an active lease, the Share + Service creates a lease on the share and returns a new lease. + + .. versionadded:: 12.5.0 + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword str lease_id: + Proposed lease ID, in a GUID string format. The Share Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START acquire_and_release_lease_on_share] + :end-before: [END acquire_and_release_lease_on_share] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a share. + """ + kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) + lease_id = kwargs.pop('lease_id', None) + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + lease.acquire(**kwargs) + return lease + + @distributed_trace + def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword protocols: + Protocols to enable on the share. Only one protocol can be enabled on the share. + :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :returns: Share-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 8 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + access_tier = kwargs.pop('access_tier', None) + timeout = kwargs.pop('timeout', None) + root_squash = kwargs.pop('root_squash', None) + protocols = kwargs.pop('protocols', None) + if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: + raise ValueError("The enabled protocol must be set to either SMB or NFS.") + if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: + raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + enabled_protocols=protocols, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 12 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 12 + :caption: Deletes the share and any snapshots. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + delete_snapshots=delete_include, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 12 + :caption: Gets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + props = self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace + def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 12 + :caption: Sets the share quota. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=None, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_share_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Sets the share properties. + + .. versionadded:: 12.4.0 + + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', and 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :keyword int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_properties] + :end-before: [END set_share_properties] + :language: python + :dedent: 12 + :caption: Sets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + access_tier = kwargs.pop('access_tier', None) + quota = kwargs.pop('quota', None) + root_squash = kwargs.pop('root_squash', None) + if all(parameter is None for parameter in [access_tier, quota, root_squash]): + raise ValueError("set_share_properties should be called with at least one parameter.") + try: + return self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 12 + :caption: Sets the share metadata. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace + def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + try: + return self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + stats = self._client.share.get_statistics( + timeout=timeout, + lease_access_conditions=access_conditions, + **kwargs) + return stats.share_usage_bytes # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 12 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @staticmethod + def _create_permission_for_share_options(file_permission, # type: str + **kwargs): + options = { + 'share_permission': SharePermission(permission=file_permission), + 'cls': deserialize_permission_key, + 'timeout': kwargs.pop('timeout', None), + } + options.update(kwargs) + return options + + @distributed_trace + def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return self._client.share.create_permission(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace + def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + directory.delete_directory(**kwargs) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_share_service_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_share_service_client.py new file mode 100644 index 00000000000..a87dd0687de --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_share_service_client.py @@ -0,0 +1,475 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +import functools +from typing import ( + Union, Optional, Any, Dict, List, + TYPE_CHECKING +) +from urllib.parse import urlparse + +from typing_extensions import Self + +from azure.core.exceptions import HttpResponseError +from azure.core.paging import ItemPaged +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import Pipeline +from ._shared.base_client import StorageAccountHostsMixin, TransportWrapper, parse_connection_str, parse_query +from ._shared.response_handlers import process_storage_error +from ._generated import AzureFileStorage +from ._generated.models import StorageServiceProperties +from ._share_client import ShareClient +from ._serialize import get_api_version +from ._models import ( + SharePropertiesPaged, + service_properties_deserialize, +) + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from ._models import ( + ShareProperties, + Metrics, + CorsRule, + ShareProtocolSettings + ) + + +class ShareServiceClient(StorageAccountHostsMixin): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + For more optional configuration, please click + `here `_. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + try: + if not account_url.lower().startswith('http'): + account_url = "https://" + account_url + except AttributeError: + raise ValueError("Account URL must be a string.") + parsed_url = urlparse(account_url.rstrip('/')) + if not parsed_url.netloc: + raise ValueError("Invalid URL: {}".format(account_url)) + + _, sas_token = parse_query(parsed_url.query) + if not sas_token and not credential: + raise ValueError( + 'You need to provide either an account shared key or SAS token when creating a storage service.') + self._query_str, credential = self._format_query_string(sas_token, credential) + super(ShareServiceClient, self).__init__(parsed_url, service='file-share', credential=credential, **kwargs) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + def _format_url(self, hostname): + """Format the endpoint URL according to the current location + mode hostname. + """ + return "{}://{}/{}".format(self.scheme, hostname, self._query_str) + + @classmethod + def from_connection_string( + cls, conn_str: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + **kwargs: Any + ) -> Self: + """Create ShareServiceClient from a Connection String. + + :param str conn_str: + A connection string to an Azure Storage account. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :returns: A File Share service client. + :rtype: ~azure.storage.fileshare.ShareServiceClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START create_share_service_client_from_conn_string] + :end-before: [END create_share_service_client_from_conn_string] + :language: python + :dedent: 8 + :caption: Create the share service client with connection string. + """ + account_url, secondary, credential = parse_connection_str(conn_str, credential, 'file') + if 'secondary_hostname' not in kwargs: + kwargs['secondary_hostname'] = secondary + return cls(account_url, credential=credential, **kwargs) + + @distributed_trace + def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 8 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + protocol=None, # type: Optional[ShareProtocolSettings] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :param protocol: + Sets protocol settings + :type protocol: ~azure.storage.fileshare.ShareProtocolSettings + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + protocol=protocol + ) + try: + self._client.service.set_properties(storage_service_properties=props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> ItemPaged[ShareProperties] + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.paging.ItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 12 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return ItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace + def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 8 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace + def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 12 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace + def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.fileshare.ShareClient + """ + share = self.get_share_client(deleted_share_name) + + try: + share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except HttpResponseError as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = Pipeline( + transport=TransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, + _configuration=self._config, _pipeline=_pipeline, _location_mode=self._location_mode, + allow_trailing_dot=self.allow_trailing_dot, allow_source_trailing_dot=self.allow_source_trailing_dot, + token_intent=self.file_request_intent) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/__init__.py new file mode 100644 index 00000000000..a8b1a27d48f --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/__init__.py @@ -0,0 +1,54 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import hmac + +try: + from urllib.parse import quote, unquote +except ImportError: + from urllib2 import quote, unquote # type: ignore + + +def url_quote(url): + return quote(url) + + +def url_unquote(url): + return unquote(url) + + +def encode_base64(data): + if isinstance(data, str): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def decode_base64_to_bytes(data): + if isinstance(data, str): + data = data.encode('utf-8') + return base64.b64decode(data) + + +def decode_base64_to_text(data): + decoded_bytes = decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + +def sign_string(key, string_to_sign, key_is_base64=True): + if key_is_base64: + key = decode_base64_to_bytes(key) + else: + if isinstance(key, str): + key = key.encode('utf-8') + if isinstance(string_to_sign, str): + string_to_sign = string_to_sign.encode('utf-8') + signed_hmac_sha256 = hmac.HMAC(key, string_to_sign, hashlib.sha256) + digest = signed_hmac_sha256.digest() + encoded_digest = encode_base64(digest) + return encoded_digest diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/authentication.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/authentication.py new file mode 100644 index 00000000000..71d103cac92 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/authentication.py @@ -0,0 +1,188 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import logging +import re +from typing import List, Tuple +from urllib.parse import unquote, urlparse + +try: + from yarl import URL +except ImportError: + pass + +try: + from azure.core.pipeline.transport import AioHttpTransport +except ImportError: + AioHttpTransport = None + +from azure.core.exceptions import ClientAuthenticationError +from azure.core.pipeline.policies import SansIOHTTPPolicy + +from . import sign_string + +logger = logging.getLogger(__name__) + + +# wraps a given exception with the desired exception type +def _wrap_exception(ex, desired_type): + msg = "" + if ex.args: + msg = ex.args[0] + return desired_type(msg) + +# This method attempts to emulate the sorting done by the service +def _storage_header_sort(input_headers: List[Tuple[str, str]]) -> List[Tuple[str, str]]: + # Define the custom alphabet for weights + custom_weights = "-!#$%&*.^_|~+\"\'(),/`~0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz{}" + + # Build dict of tuples and list of keys + header_dict = dict() + header_keys = [] + for k, v in input_headers: + header_dict[k] = v + header_keys.append(k) + + # Sort according to custom defined weights + try: + header_keys = sorted(header_keys, key=lambda word: [custom_weights.index(c) for c in word]) + except ValueError: + raise ValueError("Illegal character encountered when sorting headers.") + + # Build list of sorted tuples + sorted_headers = [] + for key in header_keys: + sorted_headers.append((key, header_dict.get(key))) + return sorted_headers + + +class AzureSigningError(ClientAuthenticationError): + """ + Represents a fatal error when attempting to sign a request. + In general, the cause of this exception is user error. For example, the given account key is not valid. + Please visit https://docs.microsoft.com/en-us/azure/storage/common/storage-create-storage-account for more info. + """ + + +# pylint: disable=no-self-use +class SharedKeyCredentialPolicy(SansIOHTTPPolicy): + + def __init__(self, account_name, account_key): + self.account_name = account_name + self.account_key = account_key + super(SharedKeyCredentialPolicy, self).__init__() + + @staticmethod + def _get_headers(request, headers_to_sign): + headers = dict((name.lower(), value) for name, value in request.http_request.headers.items() if value) + if 'content-length' in headers and headers['content-length'] == '0': + del headers['content-length'] + return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n' + + @staticmethod + def _get_verb(request): + return request.http_request.method + '\n' + + def _get_canonicalized_resource(self, request): + uri_path = urlparse(request.http_request.url).path + try: + if isinstance(request.context.transport, AioHttpTransport) or \ + isinstance(getattr(request.context.transport, "_transport", None), AioHttpTransport) or \ + isinstance(getattr(getattr(request.context.transport, "_transport", None), "_transport", None), + AioHttpTransport): + uri_path = URL(uri_path) + return '/' + self.account_name + str(uri_path) + except TypeError: + pass + return '/' + self.account_name + uri_path + + @staticmethod + def _get_canonicalized_headers(request): + string_to_sign = '' + x_ms_headers = [] + for name, value in request.http_request.headers.items(): + if name.startswith('x-ms-'): + x_ms_headers.append((name.lower(), value)) + x_ms_headers = _storage_header_sort(x_ms_headers) + for name, value in x_ms_headers: + if value is not None: + string_to_sign += ''.join([name, ':', value, '\n']) + return string_to_sign + + @staticmethod + def _get_canonicalized_resource_query(request): + sorted_queries = list(request.http_request.query.items()) + sorted_queries.sort() + + string_to_sign = '' + for name, value in sorted_queries: + if value is not None: + string_to_sign += '\n' + name.lower() + ':' + unquote(value) + + return string_to_sign + + def _add_authorization_header(self, request, string_to_sign): + try: + signature = sign_string(self.account_key, string_to_sign) + auth_string = 'SharedKey ' + self.account_name + ':' + signature + request.http_request.headers['Authorization'] = auth_string + except Exception as ex: + # Wrap any error that occurred as signing error + # Doing so will clarify/locate the source of problem + raise _wrap_exception(ex, AzureSigningError) + + def on_request(self, request): + string_to_sign = \ + self._get_verb(request) + \ + self._get_headers( + request, + [ + 'content-encoding', 'content-language', 'content-length', + 'content-md5', 'content-type', 'date', 'if-modified-since', + 'if-match', 'if-none-match', 'if-unmodified-since', 'byte_range' + ] + ) + \ + self._get_canonicalized_headers(request) + \ + self._get_canonicalized_resource(request) + \ + self._get_canonicalized_resource_query(request) + + self._add_authorization_header(request, string_to_sign) + # logger.debug("String_to_sign=%s", string_to_sign) + + +class StorageHttpChallenge(object): + def __init__(self, challenge): + """ Parses an HTTP WWW-Authentication Bearer challenge from the Storage service. """ + if not challenge: + raise ValueError("Challenge cannot be empty") + + self._parameters = {} + self.scheme, trimmed_challenge = challenge.strip().split(" ", 1) + + # name=value pairs either comma or space separated with values possibly being + # enclosed in quotes + for item in re.split('[, ]', trimmed_challenge): + comps = item.split("=") + if len(comps) == 2: + key = comps[0].strip(' "') + value = comps[1].strip(' "') + if key: + self._parameters[key] = value + + # Extract and verify required parameters + self.authorization_uri = self._parameters.get('authorization_uri') + if not self.authorization_uri: + raise ValueError("Authorization Uri not found") + + self.resource_id = self._parameters.get('resource_id') + if not self.resource_id: + raise ValueError("Resource id not found") + + uri_path = urlparse(self.authorization_uri).path.lstrip("/") + self.tenant_id = uri_path.split("/")[0] + + def get_value(self, key): + return self._parameters.get(key) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/base_client.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/base_client.py new file mode 100644 index 00000000000..842bce21f80 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/base_client.py @@ -0,0 +1,462 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import logging +import uuid +from typing import ( # pylint: disable=unused-import + Any, + Dict, + Optional, + Tuple, + TYPE_CHECKING, + Union, +) + +try: + from urllib.parse import parse_qs, quote +except ImportError: + from urlparse import parse_qs # type: ignore + from urllib2 import quote # type: ignore + +from azure.core.configuration import Configuration +from azure.core.credentials import AzureSasCredential, AzureNamedKeyCredential +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline import Pipeline +from azure.core.pipeline.transport import RequestsTransport, HttpTransport +from azure.core.pipeline.policies import ( + AzureSasCredentialPolicy, + BearerTokenCredentialPolicy, + ContentDecodePolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, + ProxyPolicy, + RedirectPolicy, + UserAgentPolicy, +) + +from .constants import CONNECTION_TIMEOUT, READ_TIMEOUT, SERVICE_HOST_BASE, STORAGE_OAUTH_SCOPE +from .models import LocationMode +from .authentication import SharedKeyCredentialPolicy +from .shared_access_signature import QueryStringConstants +from .request_handlers import serialize_batch_body, _get_batch_request_delimiter +from .policies import ( + ExponentialRetry, + QueueMessagePolicy, + StorageContentValidation, + StorageHeadersPolicy, + StorageHosts, + StorageLoggingPolicy, + StorageRequestHook, + StorageResponseHook, +) +from .._version import VERSION +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + +_LOGGER = logging.getLogger(__name__) +_SERVICE_PARAMS = { + "blob": {"primary": "BLOBENDPOINT", "secondary": "BLOBSECONDARYENDPOINT"}, + "queue": {"primary": "QUEUEENDPOINT", "secondary": "QUEUESECONDARYENDPOINT"}, + "file": {"primary": "FILEENDPOINT", "secondary": "FILESECONDARYENDPOINT"}, + "dfs": {"primary": "BLOBENDPOINT", "secondary": "BLOBENDPOINT"}, +} + + +class StorageAccountHostsMixin(object): # pylint: disable=too-many-instance-attributes + def __init__( + self, + parsed_url, # type: Any + service, # type: str + credential=None, # type: Optional[Union[str, Dict[str, str], AzureNamedKeyCredential, AzureSasCredential, "TokenCredential"]] # pylint: disable=line-too-long + **kwargs # type: Any + ): + # type: (...) -> None + self._location_mode = kwargs.get("_location_mode", LocationMode.PRIMARY) + self._hosts = kwargs.get("_hosts") + self.scheme = parsed_url.scheme + + if service not in ["blob", "queue", "file-share", "dfs"]: + raise ValueError(f"Invalid service: {service}") + service_name = service.split('-')[0] + account = parsed_url.netloc.split(f".{service_name}.core.") + + self.account_name = account[0] if len(account) > 1 else None + if not self.account_name and parsed_url.netloc.startswith("localhost") \ + or parsed_url.netloc.startswith("127.0.0.1"): + self.account_name = parsed_url.path.strip("/") + + self.credential = _format_shared_key_credential(self.account_name, credential) + if self.scheme.lower() != "https" and hasattr(self.credential, "get_token"): + raise ValueError("Token credential is only supported with HTTPS.") + + secondary_hostname = None + if hasattr(self.credential, "account_name"): + self.account_name = self.credential.account_name + secondary_hostname = f"{self.credential.account_name}-secondary.{service_name}.{SERVICE_HOST_BASE}" + + if not self._hosts: + if len(account) > 1: + secondary_hostname = parsed_url.netloc.replace(account[0], account[0] + "-secondary") + if kwargs.get("secondary_hostname"): + secondary_hostname = kwargs["secondary_hostname"] + primary_hostname = (parsed_url.netloc + parsed_url.path).rstrip('/') + self._hosts = {LocationMode.PRIMARY: primary_hostname, LocationMode.SECONDARY: secondary_hostname} + + self._config, self._pipeline = self._create_pipeline(self.credential, storage_sdk=service, **kwargs) + + def __enter__(self): + self._client.__enter__() + return self + + def __exit__(self, *args): + self._client.__exit__(*args) + + def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + self._client.close() + + @property + def url(self): + """The full endpoint URL to this entity, including SAS token if used. + + This could be either the primary endpoint, + or the secondary endpoint depending on the current :func:`location_mode`. + """ + return self._format_url(self._hosts[self._location_mode]) + + @property + def primary_endpoint(self): + """The full primary endpoint URL. + + :type: str + """ + return self._format_url(self._hosts[LocationMode.PRIMARY]) + + @property + def primary_hostname(self): + """The hostname of the primary endpoint. + + :type: str + """ + return self._hosts[LocationMode.PRIMARY] + + @property + def secondary_endpoint(self): + """The full secondary endpoint URL if configured. + + If not available a ValueError will be raised. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str + :raise ValueError: + """ + if not self._hosts[LocationMode.SECONDARY]: + raise ValueError("No secondary host configured.") + return self._format_url(self._hosts[LocationMode.SECONDARY]) + + @property + def secondary_hostname(self): + """The hostname of the secondary endpoint. + + If not available this will be None. To explicitly specify a secondary hostname, use the optional + `secondary_hostname` keyword argument on instantiation. + + :type: str or None + """ + return self._hosts[LocationMode.SECONDARY] + + @property + def location_mode(self): + """The location mode that the client is currently using. + + By default this will be "primary". Options include "primary" and "secondary". + + :type: str + """ + + return self._location_mode + + @location_mode.setter + def location_mode(self, value): + if self._hosts.get(value): + self._location_mode = value + self._client._config.url = self.url # pylint: disable=protected-access + else: + raise ValueError(f"No host URL for location mode: {value}") + + @property + def api_version(self): + """The version of the Storage API used for requests. + + :type: str + """ + return self._client._config.version # pylint: disable=protected-access + + def _format_query_string(self, sas_token, credential, snapshot=None, share_snapshot=None): + query_str = "?" + if snapshot: + query_str += f"snapshot={self.snapshot}&" + if share_snapshot: + query_str += f"sharesnapshot={self.snapshot}&" + if sas_token and isinstance(credential, AzureSasCredential): + raise ValueError( + "You cannot use AzureSasCredential when the resource URI also contains a Shared Access Signature.") + if is_credential_sastoken(credential): + query_str += credential.lstrip("?") + credential = None + elif sas_token: + query_str += sas_token + return query_str.rstrip("?&"), credential + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, "get_token"): + self._credential_policy = BearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError(f"Unsupported credential: {credential}") + + config = kwargs.get("_configuration") or create_configuration(**kwargs) + if kwargs.get("_pipeline"): + return config, kwargs["_pipeline"] + config.transport = kwargs.get("transport") # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + config.transport = RequestsTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + ContentDecodePolicy(response_encoding="utf-8"), + RedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), + config.retry_policy, + config.headers_policy, + StorageRequestHook(**kwargs), + self._credential_policy, + config.logging_policy, + StorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs) + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, Pipeline(config.transport, policies=policies) + + def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + batch_id = str(uuid.uuid1()) + + request = self._client._client.post( # pylint: disable=protected-access + url=( + f'{self.scheme}://{self.primary_hostname}/' + f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}" + f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}" + ), + headers={ + 'x-ms-version': self.api_version, + "Content-Type": "multipart/mixed; boundary=" + _get_batch_request_delimiter(batch_id, False, False) + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + Pipeline._prepare_multipart_mixed_request(request) # pylint: disable=protected-access + body = serialize_batch_body(request.multipart_mixed_info[0], batch_id) + request.set_bytes_body(body) + + temp = request.multipart_mixed_info + request.multipart_mixed_info = None + pipeline_response = self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + request.multipart_mixed_info = temp + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() + if raise_on_any_failure: + parts = list(response.parts()) + if any(p for p in parts if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts + ) + raise error + return iter(parts) + return parts + except HttpResponseError as error: + process_storage_error(error) + +class TransportWrapper(HttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, transport): + self._transport = transport + + def send(self, request, **kwargs): + return self._transport.send(request, **kwargs) + + def open(self): + pass + + def close(self): + pass + + def __enter__(self): + pass + + def __exit__(self, *args): # pylint: disable=arguments-differ + pass + + +def _format_shared_key_credential(account_name, credential): + if isinstance(credential, str): + if not account_name: + raise ValueError("Unable to determine account name for shared key credential.") + credential = {"account_name": account_name, "account_key": credential} + if isinstance(credential, dict): + if "account_name" not in credential: + raise ValueError("Shared key credential missing 'account_name") + if "account_key" not in credential: + raise ValueError("Shared key credential missing 'account_key") + return SharedKeyCredentialPolicy(**credential) + if isinstance(credential, AzureNamedKeyCredential): + return SharedKeyCredentialPolicy(credential.named_key.name, credential.named_key.key) + return credential + + +def parse_connection_str(conn_str, credential, service): + conn_str = conn_str.rstrip(";") + conn_settings = [s.split("=", 1) for s in conn_str.split(";")] + if any(len(tup) != 2 for tup in conn_settings): + raise ValueError("Connection string is either blank or malformed.") + conn_settings = dict((key.upper(), val) for key, val in conn_settings) + endpoints = _SERVICE_PARAMS[service] + primary = None + secondary = None + if not credential: + try: + credential = {"account_name": conn_settings["ACCOUNTNAME"], "account_key": conn_settings["ACCOUNTKEY"]} + except KeyError: + credential = conn_settings.get("SHAREDACCESSSIGNATURE") + if endpoints["primary"] in conn_settings: + primary = conn_settings[endpoints["primary"]] + if endpoints["secondary"] in conn_settings: + secondary = conn_settings[endpoints["secondary"]] + else: + if endpoints["secondary"] in conn_settings: + raise ValueError("Connection string specifies only secondary endpoint.") + try: + primary =( + f"{conn_settings['DEFAULTENDPOINTSPROTOCOL']}://" + f"{conn_settings['ACCOUNTNAME']}.{service}.{conn_settings['ENDPOINTSUFFIX']}" + ) + secondary = ( + f"{conn_settings['ACCOUNTNAME']}-secondary." + f"{service}.{conn_settings['ENDPOINTSUFFIX']}" + ) + except KeyError: + pass + + if not primary: + try: + primary = ( + f"https://{conn_settings['ACCOUNTNAME']}." + f"{service}.{conn_settings.get('ENDPOINTSUFFIX', SERVICE_HOST_BASE)}" + ) + except KeyError: + raise ValueError("Connection string missing required connection details.") + if service == "dfs": + primary = primary.replace(".blob.", ".dfs.") + if secondary: + secondary = secondary.replace(".blob.", ".dfs.") + return primary, secondary, credential + + +def create_configuration(**kwargs): + # type: (**Any) -> Configuration + config = Configuration(**kwargs) + config.headers_policy = StorageHeadersPolicy(**kwargs) + config.user_agent_policy = UserAgentPolicy( + sdk_moniker=f"storage-{kwargs.pop('storage_sdk')}/{VERSION}", **kwargs) + config.retry_policy = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + config.logging_policy = StorageLoggingPolicy(**kwargs) + config.proxy_policy = ProxyPolicy(**kwargs) + + # Storage settings + config.max_single_put_size = kwargs.get("max_single_put_size", 64 * 1024 * 1024) + config.copy_polling_interval = 15 + + # Block blob uploads + config.max_block_size = kwargs.get("max_block_size", 4 * 1024 * 1024) + config.min_large_block_upload_threshold = kwargs.get("min_large_block_upload_threshold", 4 * 1024 * 1024 + 1) + config.use_byte_buffer = kwargs.get("use_byte_buffer", False) + + # Page blob uploads + config.max_page_size = kwargs.get("max_page_size", 4 * 1024 * 1024) + + # Datalake file uploads + config.min_large_chunk_upload_threshold = kwargs.get("min_large_chunk_upload_threshold", 100 * 1024 * 1024 + 1) + + # Blob downloads + config.max_single_get_size = kwargs.get("max_single_get_size", 32 * 1024 * 1024) + config.max_chunk_get_size = kwargs.get("max_chunk_get_size", 4 * 1024 * 1024) + + # File uploads + config.max_range_size = kwargs.get("max_range_size", 4 * 1024 * 1024) + return config + + +def parse_query(query_str): + sas_values = QueryStringConstants.to_list() + parsed_query = {k: v[0] for k, v in parse_qs(query_str).items()} + sas_params = [f"{k}={quote(v, safe='')}" for k, v in parsed_query.items() if k in sas_values] + sas_token = None + if sas_params: + sas_token = "&".join(sas_params) + + snapshot = parsed_query.get("snapshot") or parsed_query.get("sharesnapshot") + return snapshot, sas_token + + +def is_credential_sastoken(credential): + if not credential or not isinstance(credential, str): + return False + + sas_values = QueryStringConstants.to_list() + parsed_query = parse_qs(credential.lstrip("?")) + if parsed_query and all([k in sas_values for k in parsed_query.keys()]): + return True + return False diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/base_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/base_client_async.py new file mode 100644 index 00000000000..f6dc3a9c747 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/base_client_async.py @@ -0,0 +1,189 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging + +from azure.core.credentials import AzureSasCredential +from azure.core.pipeline import AsyncPipeline +from azure.core.async_paging import AsyncList +from azure.core.exceptions import HttpResponseError +from azure.core.pipeline.policies import ( + AsyncBearerTokenCredentialPolicy, + AsyncRedirectPolicy, + AzureSasCredentialPolicy, + ContentDecodePolicy, + DistributedTracingPolicy, + HttpLoggingPolicy, +) +from azure.core.pipeline.transport import AsyncHttpTransport + +from .constants import CONNECTION_TIMEOUT, READ_TIMEOUT, STORAGE_OAUTH_SCOPE +from .authentication import SharedKeyCredentialPolicy +from .base_client import create_configuration +from .policies import ( + QueueMessagePolicy, + StorageContentValidation, + StorageHeadersPolicy, + StorageHosts, + StorageRequestHook, +) +from .policies_async import AsyncStorageResponseHook + +from .response_handlers import process_storage_error, PartialBatchErrorException + +if TYPE_CHECKING: + from azure.core.pipeline import Pipeline + from azure.core.pipeline.transport import HttpRequest + from azure.core.configuration import Configuration +_LOGGER = logging.getLogger(__name__) + + +class AsyncStorageAccountHostsMixin(object): + + def __enter__(self): + raise TypeError("Async client only supports 'async with'.") + + def __exit__(self, *args): + pass + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def __aexit__(self, *args): + await self._client.__aexit__(*args) + + async def close(self): + """ This method is to close the sockets opened by the client. + It need not be used when using with a context manager. + """ + await self._client.close() + + def _create_pipeline(self, credential, **kwargs): + # type: (Any, **Any) -> Tuple[Configuration, Pipeline] + self._credential_policy = None + if hasattr(credential, 'get_token'): + self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE) + elif isinstance(credential, SharedKeyCredentialPolicy): + self._credential_policy = credential + elif isinstance(credential, AzureSasCredential): + self._credential_policy = AzureSasCredentialPolicy(credential) + elif credential is not None: + raise TypeError(f"Unsupported credential: {credential}") + config = kwargs.get('_configuration') or create_configuration(**kwargs) + if kwargs.get('_pipeline'): + return config, kwargs['_pipeline'] + config.transport = kwargs.get('transport') # type: ignore + kwargs.setdefault("connection_timeout", CONNECTION_TIMEOUT) + kwargs.setdefault("read_timeout", READ_TIMEOUT) + if not config.transport: + try: + from azure.core.pipeline.transport import AioHttpTransport + except ImportError: + raise ImportError("Unable to create async transport. Please check aiohttp is installed.") + config.transport = AioHttpTransport(**kwargs) + policies = [ + QueueMessagePolicy(), + config.headers_policy, + config.proxy_policy, + config.user_agent_policy, + StorageContentValidation(), + StorageRequestHook(**kwargs), + self._credential_policy, + ContentDecodePolicy(response_encoding="utf-8"), + AsyncRedirectPolicy(**kwargs), + StorageHosts(hosts=self._hosts, **kwargs), # type: ignore + config.retry_policy, + config.logging_policy, + AsyncStorageResponseHook(**kwargs), + DistributedTracingPolicy(**kwargs), + HttpLoggingPolicy(**kwargs), + ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") + return config, AsyncPipeline(config.transport, policies=policies) + + async def _batch_send( + self, + *reqs, # type: HttpRequest + **kwargs + ): + """Given a series of request, do a Storage batch call. + """ + # Pop it here, so requests doesn't feel bad about additional kwarg + raise_on_any_failure = kwargs.pop("raise_on_any_failure", True) + request = self._client._client.post( # pylint: disable=protected-access + url=( + f'{self.scheme}://{self.primary_hostname}/' + f"{kwargs.pop('path', '')}?{kwargs.pop('restype', '')}" + f"comp=batch{kwargs.pop('sas', '')}{kwargs.pop('timeout', '')}" + ), + headers={ + 'x-ms-version': self.api_version + } + ) + + policies = [StorageHeadersPolicy()] + if self._credential_policy: + policies.append(self._credential_policy) + + request.set_multipart_mixed( + *reqs, + policies=policies, + enforce_https=False + ) + + pipeline_response = await self._pipeline.run( + request, **kwargs + ) + response = pipeline_response.http_response + + try: + if response.status_code not in [202]: + raise HttpResponseError(response=response) + parts = response.parts() # Return an AsyncIterator + if raise_on_any_failure: + parts_list = [] + async for part in parts: + parts_list.append(part) + if any(p for p in parts_list if not 200 <= p.status_code < 300): + error = PartialBatchErrorException( + message="There is a partial failure in the batch operation.", + response=response, parts=parts_list + ) + raise error + return AsyncList(parts_list) + return parts + except HttpResponseError as error: + process_storage_error(error) + + +class AsyncTransportWrapper(AsyncHttpTransport): + """Wrapper class that ensures that an inner client created + by a `get_client` method does not close the outer transport for the parent + when used in a context manager. + """ + def __init__(self, async_transport): + self._transport = async_transport + + async def send(self, request, **kwargs): + return await self._transport.send(request, **kwargs) + + async def open(self): + pass + + async def close(self): + pass + + async def __aenter__(self): + pass + + async def __aexit__(self, *args): # pylint: disable=arguments-differ + pass diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/constants.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/constants.py new file mode 100644 index 00000000000..0b4b029a2d1 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/constants.py @@ -0,0 +1,19 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from .._serialize import _SUPPORTED_API_VERSIONS + + +X_MS_VERSION = _SUPPORTED_API_VERSIONS[-1] + +# Default socket timeouts, in seconds +CONNECTION_TIMEOUT = 20 +READ_TIMEOUT = 60 + +DEFAULT_OAUTH_SCOPE = "/.default" +STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" + +SERVICE_HOST_BASE = 'core.windows.net' diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/models.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/models.py new file mode 100644 index 00000000000..4f15fd963e0 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/models.py @@ -0,0 +1,486 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-instance-attributes +from enum import Enum + +from azure.core import CaseInsensitiveEnumMeta + + +def get_enum_value(value): + if value is None or value in ["None", ""]: + return None + try: + return value.value + except AttributeError: + return value + + +class StorageErrorCode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + + # Generic storage values + ACCOUNT_ALREADY_EXISTS = "AccountAlreadyExists" + ACCOUNT_BEING_CREATED = "AccountBeingCreated" + ACCOUNT_IS_DISABLED = "AccountIsDisabled" + AUTHENTICATION_FAILED = "AuthenticationFailed" + AUTHORIZATION_FAILURE = "AuthorizationFailure" + NO_AUTHENTICATION_INFORMATION = "NoAuthenticationInformation" + CONDITION_HEADERS_NOT_SUPPORTED = "ConditionHeadersNotSupported" + CONDITION_NOT_MET = "ConditionNotMet" + EMPTY_METADATA_KEY = "EmptyMetadataKey" + INSUFFICIENT_ACCOUNT_PERMISSIONS = "InsufficientAccountPermissions" + INTERNAL_ERROR = "InternalError" + INVALID_AUTHENTICATION_INFO = "InvalidAuthenticationInfo" + INVALID_HEADER_VALUE = "InvalidHeaderValue" + INVALID_HTTP_VERB = "InvalidHttpVerb" + INVALID_INPUT = "InvalidInput" + INVALID_MD5 = "InvalidMd5" + INVALID_METADATA = "InvalidMetadata" + INVALID_QUERY_PARAMETER_VALUE = "InvalidQueryParameterValue" + INVALID_RANGE = "InvalidRange" + INVALID_RESOURCE_NAME = "InvalidResourceName" + INVALID_URI = "InvalidUri" + INVALID_XML_DOCUMENT = "InvalidXmlDocument" + INVALID_XML_NODE_VALUE = "InvalidXmlNodeValue" + MD5_MISMATCH = "Md5Mismatch" + METADATA_TOO_LARGE = "MetadataTooLarge" + MISSING_CONTENT_LENGTH_HEADER = "MissingContentLengthHeader" + MISSING_REQUIRED_QUERY_PARAMETER = "MissingRequiredQueryParameter" + MISSING_REQUIRED_HEADER = "MissingRequiredHeader" + MISSING_REQUIRED_XML_NODE = "MissingRequiredXmlNode" + MULTIPLE_CONDITION_HEADERS_NOT_SUPPORTED = "MultipleConditionHeadersNotSupported" + OPERATION_TIMED_OUT = "OperationTimedOut" + OUT_OF_RANGE_INPUT = "OutOfRangeInput" + OUT_OF_RANGE_QUERY_PARAMETER_VALUE = "OutOfRangeQueryParameterValue" + REQUEST_BODY_TOO_LARGE = "RequestBodyTooLarge" + RESOURCE_TYPE_MISMATCH = "ResourceTypeMismatch" + REQUEST_URL_FAILED_TO_PARSE = "RequestUrlFailedToParse" + RESOURCE_ALREADY_EXISTS = "ResourceAlreadyExists" + RESOURCE_NOT_FOUND = "ResourceNotFound" + SERVER_BUSY = "ServerBusy" + UNSUPPORTED_HEADER = "UnsupportedHeader" + UNSUPPORTED_XML_NODE = "UnsupportedXmlNode" + UNSUPPORTED_QUERY_PARAMETER = "UnsupportedQueryParameter" + UNSUPPORTED_HTTP_VERB = "UnsupportedHttpVerb" + + # Blob values + APPEND_POSITION_CONDITION_NOT_MET = "AppendPositionConditionNotMet" + BLOB_ALREADY_EXISTS = "BlobAlreadyExists" + BLOB_NOT_FOUND = "BlobNotFound" + BLOB_OVERWRITTEN = "BlobOverwritten" + BLOB_TIER_INADEQUATE_FOR_CONTENT_LENGTH = "BlobTierInadequateForContentLength" + BLOCK_COUNT_EXCEEDS_LIMIT = "BlockCountExceedsLimit" + BLOCK_LIST_TOO_LONG = "BlockListTooLong" + CANNOT_CHANGE_TO_LOWER_TIER = "CannotChangeToLowerTier" + CANNOT_VERIFY_COPY_SOURCE = "CannotVerifyCopySource" + CONTAINER_ALREADY_EXISTS = "ContainerAlreadyExists" + CONTAINER_BEING_DELETED = "ContainerBeingDeleted" + CONTAINER_DISABLED = "ContainerDisabled" + CONTAINER_NOT_FOUND = "ContainerNotFound" + CONTENT_LENGTH_LARGER_THAN_TIER_LIMIT = "ContentLengthLargerThanTierLimit" + COPY_ACROSS_ACCOUNTS_NOT_SUPPORTED = "CopyAcrossAccountsNotSupported" + COPY_ID_MISMATCH = "CopyIdMismatch" + FEATURE_VERSION_MISMATCH = "FeatureVersionMismatch" + INCREMENTAL_COPY_BLOB_MISMATCH = "IncrementalCopyBlobMismatch" + INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + #: Deprecated: Please use INCREMENTAL_COPY_OF_EARLIER_VERSION_SNAPSHOT_NOT_ALLOWED instead. + INCREMENTAL_COPY_OF_ERALIER_VERSION_SNAPSHOT_NOT_ALLOWED = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + INCREMENTAL_COPY_SOURCE_MUST_BE_SNAPSHOT = "IncrementalCopySourceMustBeSnapshot" + INFINITE_LEASE_DURATION_REQUIRED = "InfiniteLeaseDurationRequired" + INVALID_BLOB_OR_BLOCK = "InvalidBlobOrBlock" + INVALID_BLOB_TIER = "InvalidBlobTier" + INVALID_BLOB_TYPE = "InvalidBlobType" + INVALID_BLOCK_ID = "InvalidBlockId" + INVALID_BLOCK_LIST = "InvalidBlockList" + INVALID_OPERATION = "InvalidOperation" + INVALID_PAGE_RANGE = "InvalidPageRange" + INVALID_SOURCE_BLOB_TYPE = "InvalidSourceBlobType" + INVALID_SOURCE_BLOB_URL = "InvalidSourceBlobUrl" + INVALID_VERSION_FOR_PAGE_BLOB_OPERATION = "InvalidVersionForPageBlobOperation" + LEASE_ALREADY_PRESENT = "LeaseAlreadyPresent" + LEASE_ALREADY_BROKEN = "LeaseAlreadyBroken" + LEASE_ID_MISMATCH_WITH_BLOB_OPERATION = "LeaseIdMismatchWithBlobOperation" + LEASE_ID_MISMATCH_WITH_CONTAINER_OPERATION = "LeaseIdMismatchWithContainerOperation" + LEASE_ID_MISMATCH_WITH_LEASE_OPERATION = "LeaseIdMismatchWithLeaseOperation" + LEASE_ID_MISSING = "LeaseIdMissing" + LEASE_IS_BREAKING_AND_CANNOT_BE_ACQUIRED = "LeaseIsBreakingAndCannotBeAcquired" + LEASE_IS_BREAKING_AND_CANNOT_BE_CHANGED = "LeaseIsBreakingAndCannotBeChanged" + LEASE_IS_BROKEN_AND_CANNOT_BE_RENEWED = "LeaseIsBrokenAndCannotBeRenewed" + LEASE_LOST = "LeaseLost" + LEASE_NOT_PRESENT_WITH_BLOB_OPERATION = "LeaseNotPresentWithBlobOperation" + LEASE_NOT_PRESENT_WITH_CONTAINER_OPERATION = "LeaseNotPresentWithContainerOperation" + LEASE_NOT_PRESENT_WITH_LEASE_OPERATION = "LeaseNotPresentWithLeaseOperation" + MAX_BLOB_SIZE_CONDITION_NOT_MET = "MaxBlobSizeConditionNotMet" + NO_PENDING_COPY_OPERATION = "NoPendingCopyOperation" + OPERATION_NOT_ALLOWED_ON_INCREMENTAL_COPY_BLOB = "OperationNotAllowedOnIncrementalCopyBlob" + PENDING_COPY_OPERATION = "PendingCopyOperation" + PREVIOUS_SNAPSHOT_CANNOT_BE_NEWER = "PreviousSnapshotCannotBeNewer" + PREVIOUS_SNAPSHOT_NOT_FOUND = "PreviousSnapshotNotFound" + PREVIOUS_SNAPSHOT_OPERATION_NOT_SUPPORTED = "PreviousSnapshotOperationNotSupported" + SEQUENCE_NUMBER_CONDITION_NOT_MET = "SequenceNumberConditionNotMet" + SEQUENCE_NUMBER_INCREMENT_TOO_LARGE = "SequenceNumberIncrementTooLarge" + SNAPSHOT_COUNT_EXCEEDED = "SnapshotCountExceeded" + SNAPSHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded" + #: Deprecated: Please use SNAPSHOT_OPERATION_RATE_EXCEEDED instead. + SNAPHOT_OPERATION_RATE_EXCEEDED = "SnapshotOperationRateExceeded" + SNAPSHOTS_PRESENT = "SnapshotsPresent" + SOURCE_CONDITION_NOT_MET = "SourceConditionNotMet" + SYSTEM_IN_USE = "SystemInUse" + TARGET_CONDITION_NOT_MET = "TargetConditionNotMet" + UNAUTHORIZED_BLOB_OVERWRITE = "UnauthorizedBlobOverwrite" + BLOB_BEING_REHYDRATED = "BlobBeingRehydrated" + BLOB_ARCHIVED = "BlobArchived" + BLOB_NOT_ARCHIVED = "BlobNotArchived" + + # Queue values + INVALID_MARKER = "InvalidMarker" + MESSAGE_NOT_FOUND = "MessageNotFound" + MESSAGE_TOO_LARGE = "MessageTooLarge" + POP_RECEIPT_MISMATCH = "PopReceiptMismatch" + QUEUE_ALREADY_EXISTS = "QueueAlreadyExists" + QUEUE_BEING_DELETED = "QueueBeingDeleted" + QUEUE_DISABLED = "QueueDisabled" + QUEUE_NOT_EMPTY = "QueueNotEmpty" + QUEUE_NOT_FOUND = "QueueNotFound" + + # File values + CANNOT_DELETE_FILE_OR_DIRECTORY = "CannotDeleteFileOrDirectory" + CLIENT_CACHE_FLUSH_DELAY = "ClientCacheFlushDelay" + DELETE_PENDING = "DeletePending" + DIRECTORY_NOT_EMPTY = "DirectoryNotEmpty" + FILE_LOCK_CONFLICT = "FileLockConflict" + INVALID_FILE_OR_DIRECTORY_PATH_NAME = "InvalidFileOrDirectoryPathName" + PARENT_NOT_FOUND = "ParentNotFound" + READ_ONLY_ATTRIBUTE = "ReadOnlyAttribute" + SHARE_ALREADY_EXISTS = "ShareAlreadyExists" + SHARE_BEING_DELETED = "ShareBeingDeleted" + SHARE_DISABLED = "ShareDisabled" + SHARE_NOT_FOUND = "ShareNotFound" + SHARING_VIOLATION = "SharingViolation" + SHARE_SNAPSHOT_IN_PROGRESS = "ShareSnapshotInProgress" + SHARE_SNAPSHOT_COUNT_EXCEEDED = "ShareSnapshotCountExceeded" + SHARE_SNAPSHOT_OPERATION_NOT_SUPPORTED = "ShareSnapshotOperationNotSupported" + SHARE_HAS_SNAPSHOTS = "ShareHasSnapshots" + CONTAINER_QUOTA_DOWNGRADE_NOT_ALLOWED = "ContainerQuotaDowngradeNotAllowed" + + # DataLake values + CONTENT_LENGTH_MUST_BE_ZERO = 'ContentLengthMustBeZero' + PATH_ALREADY_EXISTS = 'PathAlreadyExists' + INVALID_FLUSH_POSITION = 'InvalidFlushPosition' + INVALID_PROPERTY_NAME = 'InvalidPropertyName' + INVALID_SOURCE_URI = 'InvalidSourceUri' + UNSUPPORTED_REST_VERSION = 'UnsupportedRestVersion' + FILE_SYSTEM_NOT_FOUND = 'FilesystemNotFound' + PATH_NOT_FOUND = 'PathNotFound' + RENAME_DESTINATION_PARENT_PATH_NOT_FOUND = 'RenameDestinationParentPathNotFound' + SOURCE_PATH_NOT_FOUND = 'SourcePathNotFound' + DESTINATION_PATH_IS_BEING_DELETED = 'DestinationPathIsBeingDeleted' + FILE_SYSTEM_ALREADY_EXISTS = 'FilesystemAlreadyExists' + FILE_SYSTEM_BEING_DELETED = 'FilesystemBeingDeleted' + INVALID_DESTINATION_PATH = 'InvalidDestinationPath' + INVALID_RENAME_SOURCE_PATH = 'InvalidRenameSourcePath' + INVALID_SOURCE_OR_DESTINATION_RESOURCE_TYPE = 'InvalidSourceOrDestinationResourceType' + LEASE_IS_ALREADY_BROKEN = 'LeaseIsAlreadyBroken' + LEASE_NAME_MISMATCH = 'LeaseNameMismatch' + PATH_CONFLICT = 'PathConflict' + SOURCE_PATH_IS_BEING_DELETED = 'SourcePathIsBeingDeleted' + + +class DictMixin(object): + + def __setitem__(self, key, item): + self.__dict__[key] = item + + def __getitem__(self, key): + return self.__dict__[key] + + def __repr__(self): + return str(self) + + def __len__(self): + return len(self.keys()) + + def __delitem__(self, key): + self.__dict__[key] = None + + def __eq__(self, other): + """Compare objects by comparing all attributes.""" + if isinstance(other, self.__class__): + return self.__dict__ == other.__dict__ + return False + + def __ne__(self, other): + """Compare objects by comparing all attributes.""" + return not self.__eq__(other) + + def __str__(self): + return str({k: v for k, v in self.__dict__.items() if not k.startswith('_')}) + + def has_key(self, k): + return k in self.__dict__ + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def keys(self): + return [k for k in self.__dict__ if not k.startswith('_')] + + def values(self): + return [v for k, v in self.__dict__.items() if not k.startswith('_')] + + def items(self): + return [(k, v) for k, v in self.__dict__.items() if not k.startswith('_')] + + def get(self, key, default=None): + if key in self.__dict__: + return self.__dict__[key] + return default + + +class LocationMode(object): + """ + Specifies the location the request should be sent to. This mode only applies + for RA-GRS accounts which allow secondary read access. All other account types + must use PRIMARY. + """ + + PRIMARY = 'primary' #: Requests should be sent to the primary location. + SECONDARY = 'secondary' #: Requests should be sent to the secondary location, if possible. + + +class ResourceTypes(object): + """ + Specifies the resource types that are accessible with the account SAS. + + :param bool service: + Access to service-level APIs (e.g., Get/Set Service Properties, + Get Service Stats, List Containers/Queues/Shares) + :param bool container: + Access to container-level APIs (e.g., Create/Delete Container, + Create/Delete Queue, Create/Delete Share, + List Blobs/Files and Directories) + :param bool object: + Access to object-level APIs for blobs, queue messages, and + files(e.g. Put Blob, Query Entity, Get Messages, Create File, etc.) + """ + + def __init__(self, service=False, container=False, object=False): # pylint: disable=redefined-builtin + self.service = service + self.container = container + self.object = object + self._str = (('s' if self.service else '') + + ('c' if self.container else '') + + ('o' if self.object else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create a ResourceTypes from a string. + + To specify service, container, or object you need only to + include the first letter of the word in the string. E.g. service and container, + you would provide a string "sc". + + :param str string: Specify service, container, or object in + in the string with the first letter of the word. + :return: A ResourceTypes object + :rtype: ~azure.storage.fileshare.ResourceTypes + """ + res_service = 's' in string + res_container = 'c' in string + res_object = 'o' in string + + parsed = cls(res_service, res_container, res_object) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class AccountSasPermissions(object): + """ + :class:`~ResourceTypes` class to be used with generate_account_sas + function and for the AccessPolicies used with set_*_acl. There are two types of + SAS which may be used to grant resource access. One is to grant access to a + specific resource (resource-specific). Another is to grant access to the + entire service for a specific account and allow certain operations based on + perms found here. + + :param bool read: + Valid for all signed resources types (Service, Container, and Object). + Permits read permissions to the specified resource type. + :param bool write: + Valid for all signed resources types (Service, Container, and Object). + Permits write permissions to the specified resource type. + :param bool delete: + Valid for Container and Object resource types, except for queue messages. + :param bool delete_previous_version: + Delete the previous blob version for the versioning enabled storage account. + :param bool list: + Valid for Service and Container resource types only. + :param bool add: + Valid for the following Object resource types only: queue messages, and append blobs. + :param bool create: + Valid for the following Object resource types only: blobs and files. + Users can create new blobs or files, but may not overwrite existing + blobs or files. + :param bool update: + Valid for the following Object resource types only: queue messages. + :param bool process: + Valid for the following Object resource type only: queue messages. + :keyword bool tag: + To enable set or get tags on the blobs in the container. + :keyword bool filter_by_tags: + To enable get blobs by tags, this should be used together with list permission. + :keyword bool set_immutability_policy: + To enable operations related to set/delete immutability policy. + To get immutability policy, you just need read permission. + :keyword bool permanent_delete: + To enable permanent delete on the blob is permitted. + Valid for Object resource type of Blob only. + """ + def __init__(self, read=False, write=False, delete=False, + list=False, # pylint: disable=redefined-builtin + add=False, create=False, update=False, process=False, delete_previous_version=False, **kwargs): + self.read = read + self.write = write + self.delete = delete + self.delete_previous_version = delete_previous_version + self.permanent_delete = kwargs.pop('permanent_delete', False) + self.list = list + self.add = add + self.create = create + self.update = update + self.process = process + self.tag = kwargs.pop('tag', False) + self.filter_by_tags = kwargs.pop('filter_by_tags', False) + self.set_immutability_policy = kwargs.pop('set_immutability_policy', False) + self._str = (('r' if self.read else '') + + ('w' if self.write else '') + + ('d' if self.delete else '') + + ('x' if self.delete_previous_version else '') + + ('y' if self.permanent_delete else '') + + ('l' if self.list else '') + + ('a' if self.add else '') + + ('c' if self.create else '') + + ('u' if self.update else '') + + ('p' if self.process else '') + + ('f' if self.filter_by_tags else '') + + ('t' if self.tag else '') + + ('i' if self.set_immutability_policy else '') + ) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, permission): + """Create AccountSasPermissions from a string. + + To specify read, write, delete, etc. permissions you need only to + include the first letter of the word in the string. E.g. for read and write + permissions you would provide a string "rw". + + :param str permission: Specify permissions in + the string with the first letter of the word. + :return: An AccountSasPermissions object + :rtype: ~azure.storage.fileshare.AccountSasPermissions + """ + p_read = 'r' in permission + p_write = 'w' in permission + p_delete = 'd' in permission + p_delete_previous_version = 'x' in permission + p_permanent_delete = 'y' in permission + p_list = 'l' in permission + p_add = 'a' in permission + p_create = 'c' in permission + p_update = 'u' in permission + p_process = 'p' in permission + p_tag = 't' in permission + p_filter_by_tags = 'f' in permission + p_set_immutability_policy = 'i' in permission + parsed = cls(read=p_read, write=p_write, delete=p_delete, delete_previous_version=p_delete_previous_version, + list=p_list, add=p_add, create=p_create, update=p_update, process=p_process, tag=p_tag, + filter_by_tags=p_filter_by_tags, set_immutability_policy=p_set_immutability_policy, + permanent_delete=p_permanent_delete) + + return parsed + + +class Services(object): + """Specifies the services accessible with the account SAS. + + :param bool blob: + Access for the `~azure.storage.blob.BlobServiceClient` + :param bool queue: + Access for the `~azure.storage.queue.QueueServiceClient` + :param bool fileshare: + Access for the `~azure.storage.fileshare.ShareServiceClient` + """ + + def __init__(self, blob=False, queue=False, fileshare=False): + self.blob = blob + self.queue = queue + self.fileshare = fileshare + self._str = (('b' if self.blob else '') + + ('q' if self.queue else '') + + ('f' if self.fileshare else '')) + + def __str__(self): + return self._str + + @classmethod + def from_string(cls, string): + """Create Services from a string. + + To specify blob, queue, or file you need only to + include the first letter of the word in the string. E.g. for blob and queue + you would provide a string "bq". + + :param str string: Specify blob, queue, or file in + in the string with the first letter of the word. + :return: A Services object + :rtype: ~azure.storage.fileshare.Services + """ + res_blob = 'b' in string + res_queue = 'q' in string + res_file = 'f' in string + + parsed = cls(res_blob, res_queue, res_file) + parsed._str = string # pylint: disable = protected-access + return parsed + + +class UserDelegationKey(object): + """ + Represents a user delegation key, provided to the user by Azure Storage + based on their Azure Active Directory access token. + + The fields are saved as simple strings since the user does not have to interact with this object; + to generate an identify SAS, the user can simply pass it to the right API. + + :ivar str signed_oid: + Object ID of this token. + :ivar str signed_tid: + Tenant ID of the tenant that issued this token. + :ivar str signed_start: + The datetime this token becomes valid. + :ivar str signed_expiry: + The datetime this token expires. + :ivar str signed_service: + What service this key is valid for. + :ivar str signed_version: + The version identifier of the REST service that created this token. + :ivar str value: + The user delegation key. + """ + def __init__(self): + self.signed_oid = None + self.signed_tid = None + self.signed_start = None + self.signed_expiry = None + self.signed_service = None + self.signed_version = None + self.value = None diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/parser.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/parser.py new file mode 100644 index 00000000000..a4f9da94cc2 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/parser.py @@ -0,0 +1,52 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import sys +from datetime import datetime, timezone + +EPOCH_AS_FILETIME = 116444736000000000 # January 1, 1970 as MS filetime +HUNDREDS_OF_NANOSECONDS = 10000000 + +if sys.version_info < (3,): + def _str(value): + if isinstance(value, unicode): # pylint: disable=undefined-variable + return value.encode('utf-8') + + return str(value) +else: + _str = str + + +def _to_utc_datetime(value): + return value.strftime('%Y-%m-%dT%H:%M:%SZ') + +def _rfc_1123_to_datetime(rfc_1123: str) -> datetime: + """Converts an RFC 1123 date string to a UTC datetime. + """ + if not rfc_1123: + return None + + return datetime.strptime(rfc_1123, "%a, %d %b %Y %H:%M:%S %Z") + +def _filetime_to_datetime(filetime: str) -> datetime: + """Converts an MS filetime string to a UTC datetime. "0" indicates None. + If parsing MS Filetime fails, tries RFC 1123 as backup. + """ + if not filetime: + return None + + # Try to convert to MS Filetime + try: + filetime = int(filetime) + if filetime == 0: + return None + + return datetime.fromtimestamp((filetime - EPOCH_AS_FILETIME) / HUNDREDS_OF_NANOSECONDS, tz=timezone.utc) + except ValueError: + pass + + # Try RFC 1123 as backup + return _rfc_1123_to_datetime(filetime) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/policies.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/policies.py new file mode 100644 index 00000000000..e8338782d19 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/policies.py @@ -0,0 +1,660 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import base64 +import hashlib +import re +import random +from time import time +from io import SEEK_SET, UnsupportedOperation +import logging +import uuid +from typing import Any, TYPE_CHECKING +from wsgiref.handlers import format_date_time +try: + from urllib.parse import ( + urlparse, + parse_qsl, + urlunparse, + urlencode, + ) +except ImportError: + from urllib import urlencode # type: ignore + from urlparse import ( # type: ignore + urlparse, + parse_qsl, + urlunparse, + ) + +from azure.core.pipeline.policies import ( + BearerTokenCredentialPolicy, + HeadersPolicy, + HTTPPolicy, + NetworkTraceLoggingPolicy, + RequestHistory, + SansIOHTTPPolicy, +) +from azure.core.exceptions import AzureError, ServiceRequestError, ServiceResponseError + +from .authentication import StorageHttpChallenge +from .constants import DEFAULT_OAUTH_SCOPE, STORAGE_OAUTH_SCOPE +from .models import LocationMode + +try: + _unicode_type = unicode # type: ignore +except NameError: + _unicode_type = str + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +def encode_base64(data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + +def is_exhausted(settings): + """Are we out of retries?""" + retry_counts = (settings['total'], settings['connect'], settings['read'], settings['status']) + retry_counts = list(filter(None, retry_counts)) + if not retry_counts: + return False + return min(retry_counts) < 0 + + +def retry_hook(settings, **kwargs): + if settings['hook']: + settings['hook'](retry_count=settings['count'] - 1, location_mode=settings['mode'], **kwargs) + + +def is_retry(response, mode): # pylint: disable=too-many-return-statements + """Is this method/status code retryable? (Based on allowlists and control + variables such as the number of total retries to allow, whether to + respect the Retry-After header, whether this header is present, and + whether the returned status code is on the list of status codes to + be retried upon on the presence of the aforementioned header) + """ + status = response.http_response.status_code + if 300 <= status < 500: + # An exception occurred, but in most cases it was expected. Examples could + # include a 309 Conflict or 412 Precondition Failed. + if status == 404 and mode == LocationMode.SECONDARY: + # Response code 404 should be retried if secondary was used. + return True + if status == 408: + # Response code 408 is a timeout and should be retried. + return True + return False + if status >= 500: + # Response codes above 500 with the exception of 501 Not Implemented and + # 505 Version Not Supported indicate a server issue and should be retried. + if status in [501, 505]: + return False + return True + # retry if invalid content md5 + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = response.http_request.headers.get('content-md5', None) or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + return True + return False + + +def urljoin(base_url, stub_url): + parsed = urlparse(base_url) + parsed = parsed._replace(path=parsed.path + '/' + stub_url) + return parsed.geturl() + + +class QueueMessagePolicy(SansIOHTTPPolicy): + + def on_request(self, request): + message_id = request.context.options.pop('queue_message_id', None) + if message_id: + request.http_request.url = urljoin( + request.http_request.url, + message_id) + + +class StorageHeadersPolicy(HeadersPolicy): + request_id_header_name = 'x-ms-client-request-id' + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + super(StorageHeadersPolicy, self).on_request(request) + current_time = format_date_time(time()) + request.http_request.headers['x-ms-date'] = current_time + + custom_id = request.context.options.pop('client_request_id', None) + request.http_request.headers['x-ms-client-request-id'] = custom_id or str(uuid.uuid1()) + + # def on_response(self, request, response): + # # raise exception if the echoed client request id from the service is not identical to the one we sent + # if self.request_id_header_name in response.http_response.headers: + + # client_request_id = request.http_request.headers.get(self.request_id_header_name) + + # if response.http_response.headers[self.request_id_header_name] != client_request_id: + # raise AzureError( + # "Echoed client request ID: {} does not match sent client request ID: {}. " + # "Service request ID: {}".format( + # response.http_response.headers[self.request_id_header_name], client_request_id, + # response.http_response.headers['x-ms-request-id']), + # response=response.http_response + # ) + + +class StorageHosts(SansIOHTTPPolicy): + + def __init__(self, hosts=None, **kwargs): # pylint: disable=unused-argument + self.hosts = hosts + super(StorageHosts, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + request.context.options['hosts'] = self.hosts + parsed_url = urlparse(request.http_request.url) + + # Detect what location mode we're currently requesting with + location_mode = LocationMode.PRIMARY + for key, value in self.hosts.items(): + if parsed_url.netloc == value: + location_mode = key + + # See if a specific location mode has been specified, and if so, redirect + use_location = request.context.options.pop('use_location', None) + if use_location: + # Lock retries to the specific location + request.context.options['retry_to_secondary'] = False + if use_location not in self.hosts: + raise ValueError(f"Attempting to use undefined host location {use_location}") + if use_location != location_mode: + # Update request URL to use the specified location + updated = parsed_url._replace(netloc=self.hosts[use_location]) + request.http_request.url = updated.geturl() + location_mode = use_location + + request.context.options['location_mode'] = location_mode + + +class StorageLoggingPolicy(NetworkTraceLoggingPolicy): + """A policy that logs HTTP request and response to the DEBUG logger. + + This accepts both global configuration, and per-request level with "enable_http_logger" + """ + def __init__(self, logging_enable=False, **kwargs): + self.logging_body = kwargs.pop("logging_body", False) + super(StorageLoggingPolicy, self).__init__(logging_enable=logging_enable, **kwargs) + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + http_request = request.http_request + options = request.context.options + self.logging_body = self.logging_body or options.pop("logging_body", False) + if options.pop("logging_enable", self.enable_http_logger): + request.context["logging_enable"] = True + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + log_url = http_request.url + query_params = http_request.query + if 'sig' in query_params: + log_url = log_url.replace(query_params['sig'], "sig=*****") + _LOGGER.debug("Request URL: %r", log_url) + _LOGGER.debug("Request method: %r", http_request.method) + _LOGGER.debug("Request headers:") + for header, value in http_request.headers.items(): + if header.lower() == 'authorization': + value = '*****' + elif header.lower() == 'x-ms-copy-source' and 'sig' in value: + # take the url apart and scrub away the signed signature + scheme, netloc, path, params, query, fragment = urlparse(value) + parsed_qs = dict(parse_qsl(query)) + parsed_qs['sig'] = '*****' + + # the SAS needs to be put back together + value = urlunparse((scheme, netloc, path, params, urlencode(parsed_qs), fragment)) + + _LOGGER.debug(" %r: %r", header, value) + _LOGGER.debug("Request body:") + + if self.logging_body: + _LOGGER.debug(str(http_request.body)) + else: + # We don't want to log the binary data of a file upload. + _LOGGER.debug("Hidden body, please use logging_body to show body") + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log request: %r", err) + + def on_response(self, request, response): + # type: (PipelineRequest, PipelineResponse, Any) -> None + if response.context.pop("logging_enable", self.enable_http_logger): + if not _LOGGER.isEnabledFor(logging.DEBUG): + return + + try: + _LOGGER.debug("Response status: %r", response.http_response.status_code) + _LOGGER.debug("Response headers:") + for res_header, value in response.http_response.headers.items(): + _LOGGER.debug(" %r: %r", res_header, value) + + # We don't want to log binary data if the response is a file. + _LOGGER.debug("Response content:") + pattern = re.compile(r'attachment; ?filename=["\w.]+', re.IGNORECASE) + header = response.http_response.headers.get('content-disposition') + resp_content_type = response.http_response.headers.get("content-type", "") + + if header and pattern.match(header): + filename = header.partition('=')[2] + _LOGGER.debug("File attachments: %s", filename) + elif resp_content_type.endswith("octet-stream"): + _LOGGER.debug("Body contains binary data.") + elif resp_content_type.startswith("image"): + _LOGGER.debug("Body contains image data.") + + if self.logging_body and resp_content_type.startswith("text"): + _LOGGER.debug(response.http_response.text()) + elif self.logging_body: + try: + _LOGGER.debug(response.http_response.body()) + except ValueError: + _LOGGER.debug("Body is streamable") + + except Exception as err: # pylint: disable=broad-except + _LOGGER.debug("Failed to log response: %s", repr(err)) + + +class StorageRequestHook(SansIOHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._request_callback = kwargs.get('raw_request_hook') + super(StorageRequestHook, self).__init__() + + def on_request(self, request): + # type: (PipelineRequest, **Any) -> PipelineResponse + request_callback = request.context.options.pop('raw_request_hook', self._request_callback) + if request_callback: + request_callback(request) + + +class StorageResponseHook(HTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(StorageResponseHook, self).__init__() + + def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + # Values could be 0 + data_stream_total = request.context.get('data_stream_total') + if data_stream_total is None: + data_stream_total = request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') + if download_stream_current is None: + download_stream_current = request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') + if upload_stream_current is None: + upload_stream_current = request.context.options.pop('upload_stream_current', None) + + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = self.next.send(request) + + will_retry = is_retry(response, request.context.options.get('mode')) + # Auth error could come from Bearer challenge, in which case this request will be made again + is_auth_error = response.http_response.status_code == 401 + should_update_counts = not (will_retry or is_auth_error) + + if should_update_counts and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif should_update_counts and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + response_callback(response) + request.context['response_callback'] = response_callback + return response + + +class StorageContentValidation(SansIOHTTPPolicy): + """A simple policy that sends the given headers + with the request. + + This will overwrite any headers already defined in the request. + """ + header_name = 'Content-MD5' + + def __init__(self, **kwargs): # pylint: disable=unused-argument + super(StorageContentValidation, self).__init__() + + @staticmethod + def get_content_md5(data): + # Since HTTP does not differentiate between no content and empty content, + # we have to perform a None check. + data = data or b"" + md5 = hashlib.md5() # nosec + if isinstance(data, bytes): + md5.update(data) + elif hasattr(data, 'read'): + pos = 0 + try: + pos = data.tell() + except: # pylint: disable=bare-except + pass + for chunk in iter(lambda: data.read(4096), b""): + md5.update(chunk) + try: + data.seek(pos, SEEK_SET) + except (AttributeError, IOError): + raise ValueError("Data should be bytes or a seekable file-like object.") + else: + raise ValueError("Data should be bytes or a seekable file-like object.") + + return md5.digest() + + def on_request(self, request): + # type: (PipelineRequest, Any) -> None + validate_content = request.context.options.pop('validate_content', False) + if validate_content and request.http_request.method != 'GET': + computed_md5 = encode_base64(StorageContentValidation.get_content_md5(request.http_request.data)) + request.http_request.headers[self.header_name] = computed_md5 + request.context['validate_content_md5'] = computed_md5 + request.context['validate_content'] = validate_content + + def on_response(self, request, response): + if response.context.get('validate_content', False) and response.http_response.headers.get('content-md5'): + computed_md5 = request.context.get('validate_content_md5') or \ + encode_base64(StorageContentValidation.get_content_md5(response.http_response.body())) + if response.http_response.headers['content-md5'] != computed_md5: + raise AzureError(( + f"MD5 mismatch. Expected value is '{response.http_response.headers['content-md5']}', " + f"computed value is '{computed_md5}'."), + response=response.http_response + ) + + +class StorageRetryPolicy(HTTPPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + def __init__(self, **kwargs): + self.total_retries = kwargs.pop('retry_total', 10) + self.connect_retries = kwargs.pop('retry_connect', 3) + self.read_retries = kwargs.pop('retry_read', 3) + self.status_retries = kwargs.pop('retry_status', 3) + self.retry_to_secondary = kwargs.pop('retry_to_secondary', False) + super(StorageRetryPolicy, self).__init__() + + def _set_next_host_location(self, settings, request): # pylint: disable=no-self-use + """ + A function which sets the next host location on the request, if applicable. + + :param ~azure.storage.models.RetryContext context: + The retry context containing the previous host location and the request + to evaluate and possibly modify. + """ + if settings['hosts'] and all(settings['hosts'].values()): + url = urlparse(request.url) + # If there's more than one possible location, retry to the alternative + if settings['mode'] == LocationMode.PRIMARY: + settings['mode'] = LocationMode.SECONDARY + else: + settings['mode'] = LocationMode.PRIMARY + updated = url._replace(netloc=settings['hosts'].get(settings['mode'])) + request.url = updated.geturl() + + def configure_retries(self, request): # pylint: disable=no-self-use + body_position = None + if hasattr(request.http_request.body, 'read'): + try: + body_position = request.http_request.body.tell() + except (AttributeError, UnsupportedOperation): + # if body position cannot be obtained, then retries will not work + pass + options = request.context.options + return { + 'total': options.pop("retry_total", self.total_retries), + 'connect': options.pop("retry_connect", self.connect_retries), + 'read': options.pop("retry_read", self.read_retries), + 'status': options.pop("retry_status", self.status_retries), + 'retry_secondary': options.pop("retry_to_secondary", self.retry_to_secondary), + 'mode': options.pop("location_mode", LocationMode.PRIMARY), + 'hosts': options.pop("hosts", None), + 'hook': options.pop("retry_hook", None), + 'body_position': body_position, + 'count': 0, + 'history': [] + } + + def get_backoff_time(self, settings): # pylint: disable=unused-argument,no-self-use + """ Formula for computing the current backoff. + Should be calculated by child class. + + :rtype: float + """ + return 0 + + def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + transport.sleep(backoff) + + def increment(self, settings, request, response=None, error=None): + """Increment the retry counters. + + :param response: A pipeline response object. + :param error: An error encountered during the request, or + None if the response was received successfully. + + :return: Whether the retry attempts are exhausted. + """ + settings['total'] -= 1 + + if error and isinstance(error, ServiceRequestError): + # Errors when we're fairly sure that the server did not receive the + # request, so it should be safe to retry. + settings['connect'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + elif error and isinstance(error, ServiceResponseError): + # Errors that occur after the request has been started, so we should + # assume that the server began processing it. + settings['read'] -= 1 + settings['history'].append(RequestHistory(request, error=error)) + + else: + # Incrementing because of a server error like a 500 in + # status_forcelist and a the given method is in the allowlist + if response: + settings['status'] -= 1 + settings['history'].append(RequestHistory(request, http_response=response)) + + if not is_exhausted(settings): + if request.method not in ['PUT'] and settings['retry_secondary']: + self._set_next_host_location(settings, request) + + # rewind the request body if it is a stream + if request.body and hasattr(request.body, 'read'): + # no position was saved, then retry would not work + if settings['body_position'] is None: + return False + try: + # attempt to rewind the body to the initial position + request.body.seek(settings['body_position'], SEEK_SET) + except (UnsupportedOperation, ValueError): + # if body is not seekable, then retry would not work + return False + settings['count'] += 1 + return True + return False + + def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(StorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(StorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class StorageBearerTokenCredentialPolicy(BearerTokenCredentialPolicy): + """ Custom Bearer token credential policy for following Storage Bearer challenges """ + + def __init__(self, credential, **kwargs): + # type: (TokenCredential, **Any) -> None + super(StorageBearerTokenCredentialPolicy, self).__init__(credential, STORAGE_OAUTH_SCOPE, **kwargs) + + def on_challenge(self, request, response): + # type: (PipelineRequest, PipelineResponse) -> bool + try: + auth_header = response.http_response.headers.get("WWW-Authenticate") + challenge = StorageHttpChallenge(auth_header) + except ValueError: + return False + + scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE + self.authorize_request(request, scope, tenant_id=challenge.tenant_id) + + return True diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/policies_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/policies_async.py new file mode 100644 index 00000000000..b0eae9f1c42 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/policies_async.py @@ -0,0 +1,253 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method + +import asyncio +import random +import logging +from typing import Any, TYPE_CHECKING + +from azure.core.pipeline.policies import AsyncBearerTokenCredentialPolicy, AsyncHTTPPolicy +from azure.core.exceptions import AzureError + +from .authentication import StorageHttpChallenge +from .constants import DEFAULT_OAUTH_SCOPE, STORAGE_OAUTH_SCOPE +from .policies import is_retry, StorageRetryPolicy + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + from azure.core.pipeline import PipelineRequest, PipelineResponse + + +_LOGGER = logging.getLogger(__name__) + + +async def retry_hook(settings, **kwargs): + if settings['hook']: + if asyncio.iscoroutine(settings['hook']): + await settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + else: + settings['hook']( + retry_count=settings['count'] - 1, + location_mode=settings['mode'], + **kwargs) + + +class AsyncStorageResponseHook(AsyncHTTPPolicy): + + def __init__(self, **kwargs): # pylint: disable=unused-argument + self._response_callback = kwargs.get('raw_response_hook') + super(AsyncStorageResponseHook, self).__init__() + + async def send(self, request): + # type: (PipelineRequest) -> PipelineResponse + # Values could be 0 + data_stream_total = request.context.get('data_stream_total') + if data_stream_total is None: + data_stream_total = request.context.options.pop('data_stream_total', None) + download_stream_current = request.context.get('download_stream_current') + if download_stream_current is None: + download_stream_current = request.context.options.pop('download_stream_current', None) + upload_stream_current = request.context.get('upload_stream_current') + if upload_stream_current is None: + upload_stream_current = request.context.options.pop('upload_stream_current', None) + + response_callback = request.context.get('response_callback') or \ + request.context.options.pop('raw_response_hook', self._response_callback) + + response = await self.next.send(request) + await response.http_response.load_body() + + will_retry = is_retry(response, request.context.options.get('mode')) + # Auth error could come from Bearer challenge, in which case this request will be made again + is_auth_error = response.http_response.status_code == 401 + should_update_counts = not (will_retry or is_auth_error) + + if should_update_counts and download_stream_current is not None: + download_stream_current += int(response.http_response.headers.get('Content-Length', 0)) + if data_stream_total is None: + content_range = response.http_response.headers.get('Content-Range') + if content_range: + data_stream_total = int(content_range.split(' ', 1)[1].split('/', 1)[1]) + else: + data_stream_total = download_stream_current + elif should_update_counts and upload_stream_current is not None: + upload_stream_current += int(response.http_request.headers.get('Content-Length', 0)) + for pipeline_obj in [request, response]: + pipeline_obj.context['data_stream_total'] = data_stream_total + pipeline_obj.context['download_stream_current'] = download_stream_current + pipeline_obj.context['upload_stream_current'] = upload_stream_current + if response_callback: + if asyncio.iscoroutine(response_callback): + await response_callback(response) + else: + response_callback(response) + request.context['response_callback'] = response_callback + return response + +class AsyncStorageRetryPolicy(StorageRetryPolicy): + """ + The base class for Exponential and Linear retries containing shared code. + """ + + async def sleep(self, settings, transport): + backoff = self.get_backoff_time(settings) + if not backoff or backoff < 0: + return + await transport.sleep(backoff) + + async def send(self, request): + retries_remaining = True + response = None + retry_settings = self.configure_retries(request) + while retries_remaining: + try: + response = await self.next.send(request) + if is_retry(response, retry_settings['mode']): + retries_remaining = self.increment( + retry_settings, + request=request.http_request, + response=response.http_response) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=response.http_response, + error=None) + await self.sleep(retry_settings, request.context.transport) + continue + break + except AzureError as err: + retries_remaining = self.increment( + retry_settings, request=request.http_request, error=err) + if retries_remaining: + await retry_hook( + retry_settings, + request=request.http_request, + response=None, + error=err) + await self.sleep(retry_settings, request.context.transport) + continue + raise err + if retry_settings['history']: + response.context['history'] = retry_settings['history'] + response.http_response.location_mode = retry_settings['mode'] + return response + + +class ExponentialRetry(AsyncStorageRetryPolicy): + """Exponential retry.""" + + def __init__(self, initial_backoff=15, increment_base=3, retry_total=3, + retry_to_secondary=False, random_jitter_range=3, **kwargs): + ''' + Constructs an Exponential retry object. The initial_backoff is used for + the first retry. Subsequent retries are retried after initial_backoff + + increment_power^retry_count seconds. For example, by default the first retry + occurs after 15 seconds, the second after (15+3^1) = 18 seconds, and the + third after (15+3^2) = 24 seconds. + + :param int initial_backoff: + The initial backoff interval, in seconds, for the first retry. + :param int increment_base: + The base, in seconds, to increment the initial_backoff by after the + first retry. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + ''' + self.initial_backoff = initial_backoff + self.increment_base = increment_base + self.random_jitter_range = random_jitter_range + super(ExponentialRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + backoff = self.initial_backoff + (0 if settings['count'] == 0 else pow(self.increment_base, settings['count'])) + random_range_start = backoff - self.random_jitter_range if backoff > self.random_jitter_range else 0 + random_range_end = backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class LinearRetry(AsyncStorageRetryPolicy): + """Linear retry.""" + + def __init__(self, backoff=15, retry_total=3, retry_to_secondary=False, random_jitter_range=3, **kwargs): + """ + Constructs a Linear retry object. + + :param int backoff: + The backoff interval, in seconds, between retries. + :param int max_attempts: + The maximum number of retry attempts. + :param bool retry_to_secondary: + Whether the request should be retried to secondary, if able. This should + only be enabled of RA-GRS accounts are used and potentially stale data + can be handled. + :param int random_jitter_range: + A number in seconds which indicates a range to jitter/randomize for the back-off interval. + For example, a random_jitter_range of 3 results in the back-off interval x to vary between x+3 and x-3. + """ + self.backoff = backoff + self.random_jitter_range = random_jitter_range + super(LinearRetry, self).__init__( + retry_total=retry_total, retry_to_secondary=retry_to_secondary, **kwargs) + + def get_backoff_time(self, settings): + """ + Calculates how long to sleep before retrying. + + :return: + An integer indicating how long to wait before retrying the request, + or None to indicate no retry should be performed. + :rtype: int or None + """ + random_generator = random.Random() + # the backoff interval normally does not change, however there is the possibility + # that it was modified by accessing the property directly after initializing the object + random_range_start = self.backoff - self.random_jitter_range \ + if self.backoff > self.random_jitter_range else 0 + random_range_end = self.backoff + self.random_jitter_range + return random_generator.uniform(random_range_start, random_range_end) + + +class AsyncStorageBearerTokenCredentialPolicy(AsyncBearerTokenCredentialPolicy): + """ Custom Bearer token credential policy for following Storage Bearer challenges """ + + def __init__(self, credential, **kwargs): + # type: (AsyncTokenCredential, **Any) -> None + super(AsyncStorageBearerTokenCredentialPolicy, self).__init__(credential, STORAGE_OAUTH_SCOPE, **kwargs) + + async def on_challenge(self, request, response): + # type: (PipelineRequest, PipelineResponse) -> bool + try: + auth_header = response.http_response.headers.get("WWW-Authenticate") + challenge = StorageHttpChallenge(auth_header) + except ValueError: + return False + + scope = challenge.resource_id + DEFAULT_OAUTH_SCOPE + await self.authorize_request(request, scope, tenant_id=challenge.tenant_id) + + return True diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/request_handlers.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/request_handlers.py new file mode 100644 index 00000000000..923b7890fda --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/request_handlers.py @@ -0,0 +1,278 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) + +import logging +from os import fstat +import stat +from io import (SEEK_END, SEEK_SET, UnsupportedOperation) + +import isodate + +from azure.core.exceptions import raise_with_traceback + + +_LOGGER = logging.getLogger(__name__) + +_REQUEST_DELIMITER_PREFIX = "batch_" +_HTTP1_1_IDENTIFIER = "HTTP/1.1" +_HTTP_LINE_ENDING = "\r\n" + + +def serialize_iso(attr): + """Serialize Datetime object into ISO-8601 formatted string. + + :param Datetime attr: Object to be serialized. + :rtype: str + :raises: ValueError if format invalid. + """ + if not attr: + return None + if isinstance(attr, str): + attr = isodate.parse_datetime(attr) + try: + utc = attr.utctimetuple() + if utc.tm_year > 9999 or utc.tm_year < 1: + raise OverflowError("Hit max or min date") + + date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( + utc.tm_year, utc.tm_mon, utc.tm_mday, + utc.tm_hour, utc.tm_min, utc.tm_sec) + return date + 'Z' + except (ValueError, OverflowError) as err: + msg = "Unable to serialize datetime object." + raise_with_traceback(ValueError, msg, err) + except AttributeError as err: + msg = "ISO-8601 object must be valid Datetime object." + raise_with_traceback(TypeError, msg, err) + + +def get_length(data): + length = None + # Check if object implements the __len__ method, covers most input cases such as bytearray. + try: + length = len(data) + except: # pylint: disable=bare-except + pass + + if not length: + # Check if the stream is a file-like stream object. + # If so, calculate the size using the file descriptor. + try: + fileno = data.fileno() + except (AttributeError, UnsupportedOperation): + pass + else: + try: + mode = fstat(fileno).st_mode + if stat.S_ISREG(mode) or stat.S_ISLNK(mode): + #st_size only meaningful if regular file or symlink, other types + # e.g. sockets may return misleading sizes like 0 + return fstat(fileno).st_size + except OSError: + # Not a valid fileno, may be possible requests returned + # a socket number? + pass + + # If the stream is seekable and tell() is implemented, calculate the stream size. + try: + current_position = data.tell() + data.seek(0, SEEK_END) + length = data.tell() - current_position + data.seek(current_position, SEEK_SET) + except (AttributeError, OSError, UnsupportedOperation): + pass + + return length + + +def read_length(data): + try: + if hasattr(data, 'read'): + read_data = b'' + for chunk in iter(lambda: data.read(4096), b""): + read_data += chunk + return len(read_data), read_data + if hasattr(data, '__iter__'): + read_data = b'' + for chunk in data: + read_data += chunk + return len(read_data), read_data + except: # pylint: disable=bare-except + pass + raise ValueError("Unable to calculate content length, please specify.") + + +def validate_and_format_range_headers( + start_range, end_range, start_range_required=True, + end_range_required=True, check_content_md5=False, align_to_page=False): + # If end range is provided, start range must be provided + if (start_range_required or end_range is not None) and start_range is None: + raise ValueError("start_range value cannot be None.") + if end_range_required and end_range is None: + raise ValueError("end_range value cannot be None.") + + # Page ranges must be 512 aligned + if align_to_page: + if start_range is not None and start_range % 512 != 0: + raise ValueError(f"Invalid page blob start_range: {start_range}. " + "The size must be aligned to a 512-byte boundary.") + if end_range is not None and end_range % 512 != 511: + raise ValueError(f"Invalid page blob end_range: {end_range}. " + "The size must be aligned to a 512-byte boundary.") + + # Format based on whether end_range is present + range_header = None + if end_range is not None: + range_header = f'bytes={start_range}-{end_range}' + elif start_range is not None: + range_header = f"bytes={start_range}-" + + # Content MD5 can only be provided for a complete range less than 4MB in size + range_validation = None + if check_content_md5: + if start_range is None or end_range is None: + raise ValueError("Both start and end range required for MD5 content validation.") + if end_range - start_range > 4 * 1024 * 1024: + raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.") + range_validation = 'true' + + return range_header, range_validation + + +def add_metadata_headers(metadata=None): + # type: (Optional[Dict[str, str]]) -> Dict[str, str] + headers = {} + if metadata: + for key, value in metadata.items(): + headers[f'x-ms-meta-{key.strip()}'] = value.strip() if value else value + return headers + + +def serialize_batch_body(requests, batch_id): + """ + -- + + -- + (repeated as needed) + ---- + + Serializes the requests in this batch to a single HTTP mixed/multipart body. + + :param list[~azure.core.pipeline.transport.HttpRequest] requests: + a list of sub-request for the batch request + :param str batch_id: + to be embedded in batch sub-request delimiter + :return: The body bytes for this batch. + """ + + if requests is None or len(requests) == 0: + raise ValueError('Please provide sub-request(s) for this batch request') + + delimiter_bytes = (_get_batch_request_delimiter(batch_id, True, False) + _HTTP_LINE_ENDING).encode('utf-8') + newline_bytes = _HTTP_LINE_ENDING.encode('utf-8') + batch_body = list() + + content_index = 0 + for request in requests: + request.headers.update({ + "Content-ID": str(content_index), + "Content-Length": str(0) + }) + batch_body.append(delimiter_bytes) + batch_body.append(_make_body_from_sub_request(request)) + batch_body.append(newline_bytes) + content_index += 1 + + batch_body.append(_get_batch_request_delimiter(batch_id, True, True).encode('utf-8')) + # final line of body MUST have \r\n at the end, or it will not be properly read by the service + batch_body.append(newline_bytes) + + return bytes().join(batch_body) + + +def _get_batch_request_delimiter(batch_id, is_prepend_dashes=False, is_append_dashes=False): + """ + Gets the delimiter used for this batch request's mixed/multipart HTTP format. + + :param str batch_id: + Randomly generated id + :param bool is_prepend_dashes: + Whether to include the starting dashes. Used in the body, but non on defining the delimiter. + :param bool is_append_dashes: + Whether to include the ending dashes. Used in the body on the closing delimiter only. + :return: The delimiter, WITHOUT a trailing newline. + """ + + prepend_dashes = '--' if is_prepend_dashes else '' + append_dashes = '--' if is_append_dashes else '' + + return prepend_dashes + _REQUEST_DELIMITER_PREFIX + batch_id + append_dashes + + +def _make_body_from_sub_request(sub_request): + """ + Content-Type: application/http + Content-ID: + Content-Transfer-Encoding: (if present) + + HTTP/ +
:
(repeated as necessary) + Content-Length: + (newline if content length > 0) + (if content length > 0) + + Serializes an http request. + + :param ~azure.core.pipeline.transport.HttpRequest sub_request: + Request to serialize. + :return: The serialized sub-request in bytes + """ + + # put the sub-request's headers into a list for efficient str concatenation + sub_request_body = list() + + # get headers for ease of manipulation; remove headers as they are used + headers = sub_request.headers + + # append opening headers + sub_request_body.append("Content-Type: application/http") + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-ID: ") + sub_request_body.append(headers.pop("Content-ID", "")) + sub_request_body.append(_HTTP_LINE_ENDING) + + sub_request_body.append("Content-Transfer-Encoding: binary") + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + # append HTTP verb and path and query and HTTP version + sub_request_body.append(sub_request.method) + sub_request_body.append(' ') + sub_request_body.append(sub_request.url) + sub_request_body.append(' ') + sub_request_body.append(_HTTP1_1_IDENTIFIER) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append remaining headers (this will set the Content-Length, as it was set on `sub-request`) + for header_name, header_value in headers.items(): + if header_value is not None: + sub_request_body.append(header_name) + sub_request_body.append(": ") + sub_request_body.append(header_value) + sub_request_body.append(_HTTP_LINE_ENDING) + + # append blank line + sub_request_body.append(_HTTP_LINE_ENDING) + + return ''.join(sub_request_body).encode() diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/response_handlers.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/response_handlers.py new file mode 100644 index 00000000000..aec26f43d2a --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/response_handlers.py @@ -0,0 +1,203 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, Iterable, Dict, List, Type, Tuple, + TYPE_CHECKING +) +import logging +from xml.etree.ElementTree import Element + +from azure.core.pipeline.policies import ContentDecodePolicy +from azure.core.exceptions import ( + HttpResponseError, + ResourceNotFoundError, + ResourceModifiedError, + ResourceExistsError, + ClientAuthenticationError, + DecodeError) + +from .parser import _to_utc_datetime +from .models import StorageErrorCode, UserDelegationKey, get_enum_value + + +if TYPE_CHECKING: + from datetime import datetime + from azure.core.exceptions import AzureError + + +_LOGGER = logging.getLogger(__name__) + + +class PartialBatchErrorException(HttpResponseError): + """There is a partial failure in batch operations. + + :param str message: The message of the exception. + :param response: Server response to be deserialized. + :param list parts: A list of the parts in multipart response. + """ + + def __init__(self, message, response, parts): + self.parts = parts + super(PartialBatchErrorException, self).__init__(message=message, response=response) + + +def parse_length_from_content_range(content_range): + ''' + Parses the blob length from the content range header: bytes 1-3/65537 + ''' + if content_range is None: + return None + + # First, split in space and take the second half: '1-3/65537' + # Next, split on slash and take the second half: '65537' + # Finally, convert to an int: 65537 + return int(content_range.split(' ', 1)[1].split('/', 1)[1]) + + +def normalize_headers(headers): + normalized = {} + for key, value in headers.items(): + if key.startswith('x-ms-'): + key = key[5:] + normalized[key.lower().replace('-', '_')] = get_enum_value(value) + return normalized + + +def deserialize_metadata(response, obj, headers): # pylint: disable=unused-argument + try: + raw_metadata = {k: v for k, v in response.http_response.headers.items() if k.startswith("x-ms-meta-")} + except AttributeError: + raw_metadata = {k: v for k, v in response.headers.items() if k.startswith("x-ms-meta-")} + return {k[10:]: v for k, v in raw_metadata.items()} + + +def return_response_headers(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers) + + +def return_headers_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return normalize_headers(response_headers), deserialized + + +def return_context_and_deserialized(response, deserialized, response_headers): # pylint: disable=unused-argument + return response.http_response.location_mode, deserialized + + +def return_raw_deserialized(response, *_): + return response.http_response.location_mode, response.context[ContentDecodePolicy.CONTEXT_NAME] + + +def process_storage_error(storage_error): # pylint:disable=too-many-statements + raise_error = HttpResponseError + serialized = False + if not storage_error.response: + raise storage_error + # If it is one of those three then it has been serialized prior by the generated layer. + if isinstance(storage_error, (PartialBatchErrorException, + ClientAuthenticationError, ResourceNotFoundError, ResourceExistsError)): + serialized = True + error_code = storage_error.response.headers.get('x-ms-error-code') + error_message = storage_error.message + additional_data = {} + error_dict = {} + try: + error_body = ContentDecodePolicy.deserialize_from_http_generics(storage_error.response) + try: + error_body = error_body or storage_error.response.reason + except AttributeError: + error_body = '' + # If it is an XML response + if isinstance(error_body, Element): + error_dict = { + child.tag.lower(): child.text + for child in error_body + } + # If it is a JSON response + elif isinstance(error_body, dict): + error_dict = error_body.get('error', {}) + elif not error_code: + _LOGGER.warning( + 'Unexpected return type %s from ContentDecodePolicy.deserialize_from_http_generics.', type(error_body)) + error_dict = {'message': str(error_body)} + + # If we extracted from a Json or XML response + if error_dict: + error_code = error_dict.get('code') + error_message = error_dict.get('message') + additional_data = {k: v for k, v in error_dict.items() if k not in {'code', 'message'}} + except DecodeError: + pass + + try: + # This check would be unnecessary if we have already serialized the error + if error_code and not serialized: + error_code = StorageErrorCode(error_code) + if error_code in [StorageErrorCode.condition_not_met, + StorageErrorCode.blob_overwritten]: + raise_error = ResourceModifiedError + if error_code in [StorageErrorCode.invalid_authentication_info, + StorageErrorCode.authentication_failed]: + raise_error = ClientAuthenticationError + if error_code in [StorageErrorCode.resource_not_found, + StorageErrorCode.cannot_verify_copy_source, + StorageErrorCode.blob_not_found, + StorageErrorCode.queue_not_found, + StorageErrorCode.container_not_found, + StorageErrorCode.parent_not_found, + StorageErrorCode.share_not_found]: + raise_error = ResourceNotFoundError + if error_code in [StorageErrorCode.account_already_exists, + StorageErrorCode.account_being_created, + StorageErrorCode.resource_already_exists, + StorageErrorCode.resource_type_mismatch, + StorageErrorCode.blob_already_exists, + StorageErrorCode.queue_already_exists, + StorageErrorCode.container_already_exists, + StorageErrorCode.container_being_deleted, + StorageErrorCode.queue_being_deleted, + StorageErrorCode.share_already_exists, + StorageErrorCode.share_being_deleted]: + raise_error = ResourceExistsError + except ValueError: + # Got an unknown error code + pass + + # Error message should include all the error properties + try: + error_message += f"\nErrorCode:{error_code.value}" + except AttributeError: + error_message += f"\nErrorCode:{error_code}" + for name, info in additional_data.items(): + error_message += f"\n{name}:{info}" + + # No need to create an instance if it has already been serialized by the generated layer + if serialized: + storage_error.message = error_message + error = storage_error + else: + error = raise_error(message=error_message, response=storage_error.response) + # Ensure these properties are stored in the error instance as well (not just the error message) + error.error_code = error_code + error.additional_info = additional_data + # error.args is what's surfaced on the traceback - show error message in all cases + error.args = (error.message,) + try: + # `from None` prevents us from double printing the exception (suppresses generated layer error context) + exec("raise error from None") # pylint: disable=exec-used # nosec + except SyntaxError: + raise error + + +def parse_to_internal_user_delegation_key(service_user_delegation_key): + internal_user_delegation_key = UserDelegationKey() + internal_user_delegation_key.signed_oid = service_user_delegation_key.signed_oid + internal_user_delegation_key.signed_tid = service_user_delegation_key.signed_tid + internal_user_delegation_key.signed_start = _to_utc_datetime(service_user_delegation_key.signed_start) + internal_user_delegation_key.signed_expiry = _to_utc_datetime(service_user_delegation_key.signed_expiry) + internal_user_delegation_key.signed_service = service_user_delegation_key.signed_service + internal_user_delegation_key.signed_version = service_user_delegation_key.signed_version + internal_user_delegation_key.value = service_user_delegation_key.value + return internal_user_delegation_key diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/shared_access_signature.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/shared_access_signature.py new file mode 100644 index 00000000000..51e39c57bdc --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/shared_access_signature.py @@ -0,0 +1,223 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from datetime import date + +from .parser import _str, _to_utc_datetime +from .constants import X_MS_VERSION +from . import sign_string, url_quote + +# cspell:ignoreRegExp rsc. +# cspell:ignoreRegExp s..?id +class QueryStringConstants(object): + SIGNED_SIGNATURE = 'sig' + SIGNED_PERMISSION = 'sp' + SIGNED_START = 'st' + SIGNED_EXPIRY = 'se' + SIGNED_RESOURCE = 'sr' + SIGNED_IDENTIFIER = 'si' + SIGNED_IP = 'sip' + SIGNED_PROTOCOL = 'spr' + SIGNED_VERSION = 'sv' + SIGNED_CACHE_CONTROL = 'rscc' + SIGNED_CONTENT_DISPOSITION = 'rscd' + SIGNED_CONTENT_ENCODING = 'rsce' + SIGNED_CONTENT_LANGUAGE = 'rscl' + SIGNED_CONTENT_TYPE = 'rsct' + START_PK = 'spk' + START_RK = 'srk' + END_PK = 'epk' + END_RK = 'erk' + SIGNED_RESOURCE_TYPES = 'srt' + SIGNED_SERVICES = 'ss' + SIGNED_OID = 'skoid' + SIGNED_TID = 'sktid' + SIGNED_KEY_START = 'skt' + SIGNED_KEY_EXPIRY = 'ske' + SIGNED_KEY_SERVICE = 'sks' + SIGNED_KEY_VERSION = 'skv' + + # for ADLS + SIGNED_AUTHORIZED_OID = 'saoid' + SIGNED_UNAUTHORIZED_OID = 'suoid' + SIGNED_CORRELATION_ID = 'scid' + SIGNED_DIRECTORY_DEPTH = 'sdd' + + @staticmethod + def to_list(): + return [ + QueryStringConstants.SIGNED_SIGNATURE, + QueryStringConstants.SIGNED_PERMISSION, + QueryStringConstants.SIGNED_START, + QueryStringConstants.SIGNED_EXPIRY, + QueryStringConstants.SIGNED_RESOURCE, + QueryStringConstants.SIGNED_IDENTIFIER, + QueryStringConstants.SIGNED_IP, + QueryStringConstants.SIGNED_PROTOCOL, + QueryStringConstants.SIGNED_VERSION, + QueryStringConstants.SIGNED_CACHE_CONTROL, + QueryStringConstants.SIGNED_CONTENT_DISPOSITION, + QueryStringConstants.SIGNED_CONTENT_ENCODING, + QueryStringConstants.SIGNED_CONTENT_LANGUAGE, + QueryStringConstants.SIGNED_CONTENT_TYPE, + QueryStringConstants.START_PK, + QueryStringConstants.START_RK, + QueryStringConstants.END_PK, + QueryStringConstants.END_RK, + QueryStringConstants.SIGNED_RESOURCE_TYPES, + QueryStringConstants.SIGNED_SERVICES, + QueryStringConstants.SIGNED_OID, + QueryStringConstants.SIGNED_TID, + QueryStringConstants.SIGNED_KEY_START, + QueryStringConstants.SIGNED_KEY_EXPIRY, + QueryStringConstants.SIGNED_KEY_SERVICE, + QueryStringConstants.SIGNED_KEY_VERSION, + # for ADLS + QueryStringConstants.SIGNED_AUTHORIZED_OID, + QueryStringConstants.SIGNED_UNAUTHORIZED_OID, + QueryStringConstants.SIGNED_CORRELATION_ID, + QueryStringConstants.SIGNED_DIRECTORY_DEPTH, + ] + + +class SharedAccessSignature(object): + ''' + Provides a factory for creating account access + signature tokens with an account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key, x_ms_version=X_MS_VERSION): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + :param str x_ms_version: + The service version used to generate the shared access signatures. + ''' + self.account_name = account_name + self.account_key = account_key + self.x_ms_version = x_ms_version + + def generate_account(self, services, resource_types, permission, expiry, start=None, + ip=None, protocol=None): + ''' + Generates a shared access signature for the account. + Use the returned signature with the sas_token parameter of the service + or to create a new account object. + + :param ResourceTypes resource_types: + Specifies the resource types that are accessible with the account + SAS. You can combine values to provide access to more than one + resource type. + :param AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. You can combine + values to provide more than one permission. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + ''' + sas = _SharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_account(services, resource_types) + sas.add_account_signature(self.account_name, self.account_key) + + return sas.get_token() + + +class _SharedAccessHelper(object): + def __init__(self): + self.query_dict = {} + + def _add_query(self, name, val): + if val: + self.query_dict[name] = _str(val) if val is not None else None + + def add_base(self, permission, expiry, start, ip, protocol, x_ms_version): + if isinstance(start, date): + start = _to_utc_datetime(start) + + if isinstance(expiry, date): + expiry = _to_utc_datetime(expiry) + + self._add_query(QueryStringConstants.SIGNED_START, start) + self._add_query(QueryStringConstants.SIGNED_EXPIRY, expiry) + self._add_query(QueryStringConstants.SIGNED_PERMISSION, permission) + self._add_query(QueryStringConstants.SIGNED_IP, ip) + self._add_query(QueryStringConstants.SIGNED_PROTOCOL, protocol) + self._add_query(QueryStringConstants.SIGNED_VERSION, x_ms_version) + + def add_resource(self, resource): + self._add_query(QueryStringConstants.SIGNED_RESOURCE, resource) + + def add_id(self, policy_id): + self._add_query(QueryStringConstants.SIGNED_IDENTIFIER, policy_id) + + def add_account(self, services, resource_types): + self._add_query(QueryStringConstants.SIGNED_SERVICES, services) + self._add_query(QueryStringConstants.SIGNED_RESOURCE_TYPES, resource_types) + + def add_override_response_headers(self, cache_control, + content_disposition, + content_encoding, + content_language, + content_type): + self._add_query(QueryStringConstants.SIGNED_CACHE_CONTROL, cache_control) + self._add_query(QueryStringConstants.SIGNED_CONTENT_DISPOSITION, content_disposition) + self._add_query(QueryStringConstants.SIGNED_CONTENT_ENCODING, content_encoding) + self._add_query(QueryStringConstants.SIGNED_CONTENT_LANGUAGE, content_language) + self._add_query(QueryStringConstants.SIGNED_CONTENT_TYPE, content_type) + + def add_account_signature(self, account_name, account_key): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + string_to_sign = \ + (account_name + '\n' + + get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_SERVICES) + + get_value_to_append(QueryStringConstants.SIGNED_RESOURCE_TYPES) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + '\n' # Signed Encryption Scope - always empty for fileshare + ) + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + def get_token(self): + return '&'.join([f'{n}={url_quote(v)}' for n, v in self.query_dict.items() if v is not None]) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/uploads.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/uploads.py new file mode 100644 index 00000000000..7aca3a9e8f5 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/uploads.py @@ -0,0 +1,606 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +from concurrent import futures +from io import BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation +from itertools import islice +from math import ceil +from threading import Lock + +from azure.core.tracing.common import with_current_context + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers + + +_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024 +_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object." + + +def _parallel_uploads(executor, uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(executor.submit(with_current_context(uploader), next_chunk)) + except StopIteration: + break + + # Wait for the remaining uploads to finish + done, _running = futures.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + validate_content=None, + progress_hook=None, + **kwargs): + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + validate_content=validate_content, + progress_hook=progress_hook, + **kwargs) + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_chunk_streams() + running_futures = [ + executor.submit(with_current_context(uploader.process_chunk), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()] + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + progress_hook=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + progress_hook=progress_hook, + **kwargs) + + if parallel: + with futures.ThreadPoolExecutor(max_concurrency) as executor: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + executor.submit(with_current_context(uploader.process_substream_block), u) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()] + if any(range_ids): + return sorted(range_ids) + return [] + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__( + self, service, + total_size, + chunk_size, + stream, + parallel, + encryptor=None, + padder=None, + progress_hook=None, + **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_lock = Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + self.progress_hook = progress_hook + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + def get_chunk_streams(self): + index = 0 + while True: + data = b"" + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if not isinstance(temp, bytes): + raise TypeError("Blob data should be of type bytes.") + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b"" or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + def _update_progress(self, length): + if self.progress_lock is not None: + with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + self.progress_hook(self.progress_total, self.total_size) + + def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = self._upload_chunk(chunk_offset, chunk_data) + self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + def process_substream_block(self, block_data): + return self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + def _upload_substream_block_with_progress(self, index, block_stream): + range_id = self._upload_substream_block(index, block_stream) + self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop("modified_access_conditions", None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = f'{chunk_offset:032d}' + block_id = encode_base64(url_quote(encode_base64(index))) + self.service.stage_block( + block_id, + len(chunk_data), + chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return index, block_id + + def _upload_substream_block(self, index, block_stream): + try: + block_id = f'BlockId{"%05d" % (index/self.chunk_size)}' + self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + return not any(bytearray(chunk_data)) + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = f"bytes={chunk_offset}-{chunk_end}" + computed_md5 = None + self.response_headers = self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + self.current_length = int(self.response_headers["blob_append_offset"]) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + self.response_headers = self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + def _upload_substream_block(self, index, block_stream): + try: + self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + return f'bytes={chunk_offset}-{chunk_end}', response + + # TODO: Implement this method. + def _upload_substream_block(self, index, block_stream): + pass + + +class SubStream(IOBase): + + def __init__(self, wrapped_stream, stream_begin_index, length, lockObj): + # Python 2.7: file-like objects created with open() typically support seek(), but are not + # derivations of io.IOBase and thus do not implement seekable(). + # Python > 3.0: file-like objects created with open() are derived from io.IOBase. + try: + # only the main thread runs this, so there's no need grabbing the lock + wrapped_stream.seek(0, SEEK_CUR) + except: + raise ValueError("Wrapped stream must support seek().") + + self._lock = lockObj + self._wrapped_stream = wrapped_stream + self._position = 0 + self._stream_begin_index = stream_begin_index + self._length = length + self._buffer = BytesIO() + + # we must avoid buffering more than necessary, and also not use up too much memory + # so the max buffer size is capped at 4MB + self._max_buffer_size = ( + length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE + ) + self._current_buffer_start = 0 + self._current_buffer_size = 0 + super(SubStream, self).__init__() + + def __len__(self): + return self._length + + def close(self): + if self._buffer: + self._buffer.close() + self._wrapped_stream = None + IOBase.close(self) + + def fileno(self): + return self._wrapped_stream.fileno() + + def flush(self): + pass + + def read(self, size=None): + if self.closed: # pylint: disable=using-constant-test + raise ValueError("Stream is closed.") + + if size is None: + size = self._length - self._position + + # adjust if out of bounds + if size + self._position >= self._length: + size = self._length - self._position + + # return fast + if size == 0 or self._buffer.closed: + return b"" + + # attempt first read from the read buffer and update position + read_buffer = self._buffer.read(size) + bytes_read = len(read_buffer) + bytes_remaining = size - bytes_read + self._position += bytes_read + + # repopulate the read buffer from the underlying stream to fulfill the request + # ensure the seek and read operations are done atomically (only if a lock is provided) + if bytes_remaining > 0: + with self._buffer: + # either read in the max buffer size specified on the class + # or read in just enough data for the current block/sub stream + current_max_buffer_size = min(self._max_buffer_size, self._length - self._position) + + # lock is only defined if max_concurrency > 1 (parallel uploads) + if self._lock: + with self._lock: + # reposition the underlying stream to match the start of the data to read + absolute_position = self._stream_begin_index + self._position + self._wrapped_stream.seek(absolute_position, SEEK_SET) + # If we can't seek to the right location, our read will be corrupted so fail fast. + if self._wrapped_stream.tell() != absolute_position: + raise IOError("Stream failed to seek to the desired location.") + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + else: + absolute_position = self._stream_begin_index + self._position + # It's possible that there's connection problem during data transfer, + # so when we retry we don't want to read from current position of wrapped stream, + # instead we should seek to where we want to read from. + if self._wrapped_stream.tell() != absolute_position: + self._wrapped_stream.seek(absolute_position, SEEK_SET) + + buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size) + + if buffer_from_stream: + # update the buffer with new data from the wrapped stream + # we need to note down the start position and size of the buffer, in case seek is performed later + self._buffer = BytesIO(buffer_from_stream) + self._current_buffer_start = self._position + self._current_buffer_size = len(buffer_from_stream) + + # read the remaining bytes from the new buffer and update position + second_read_buffer = self._buffer.read(bytes_remaining) + read_buffer += second_read_buffer + self._position += len(second_read_buffer) + + return read_buffer + + def readable(self): + return True + + def readinto(self, b): + raise UnsupportedOperation + + def seek(self, offset, whence=0): + if whence is SEEK_SET: + start_index = 0 + elif whence is SEEK_CUR: + start_index = self._position + elif whence is SEEK_END: + start_index = self._length + offset = -offset + else: + raise ValueError("Invalid argument for the 'whence' parameter.") + + pos = start_index + offset + + if pos > self._length: + pos = self._length + elif pos < 0: + pos = 0 + + # check if buffer is still valid + # if not, drop buffer + if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size: + self._buffer.close() + self._buffer = BytesIO() + else: # if yes seek to correct position + delta = pos - self._current_buffer_start + self._buffer.seek(delta, SEEK_SET) + + self._position = pos + return pos + + def seekable(self): + return True + + def tell(self): + return self._position + + def write(self): + raise UnsupportedOperation + + def writelines(self): + raise UnsupportedOperation + + def writeable(self): + return False + + +class IterStreamer(object): + """ + File-like streaming iterator. + """ + + def __init__(self, generator, encoding="UTF-8"): + self.generator = generator + self.iterator = iter(generator) + self.leftover = b"" + self.encoding = encoding + + def __len__(self): + return self.generator.__len__() + + def __iter__(self): + return self.iterator + + def seekable(self): + return False + + def __next__(self): + return next(self.iterator) + + next = __next__ # Python 2 compatibility. + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is not seekable.") + + def read(self, size): + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = self.__next__() + if isinstance(chunk, str): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + except StopIteration: + pass + + if count > size: + self.leftover = data[size:] + + return data[:size] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/uploads_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/uploads_async.py new file mode 100644 index 00000000000..dd436906146 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared/uploads_async.py @@ -0,0 +1,461 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=no-self-use + +import asyncio +import inspect +import threading +from asyncio import Lock +from io import UnsupportedOperation +from itertools import islice +from math import ceil +from typing import AsyncGenerator, Union + +from . import encode_base64, url_quote +from .request_handlers import get_length +from .response_handlers import return_response_headers +from .uploads import SubStream, IterStreamer # pylint: disable=unused-import + + +async def _async_parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = await pending.__anext__() + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopAsyncIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def _parallel_uploads(uploader, pending, running): + range_ids = [] + while True: + # Wait for some download to finish before adding a new one + done, running = await asyncio.wait(running, return_when=asyncio.FIRST_COMPLETED) + range_ids.extend([chunk.result() for chunk in done]) + try: + for _ in range(0, len(done)): + next_chunk = next(pending) + running.add(asyncio.ensure_future(uploader(next_chunk))) + except StopIteration: + break + + # Wait for the remaining uploads to finish + if running: + done, _running = await asyncio.wait(running) + range_ids.extend([chunk.result() for chunk in done]) + return range_ids + + +async def upload_data_chunks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + progress_hook=None, + **kwargs): + + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + progress_hook=progress_hook, + **kwargs) + + if parallel: + upload_tasks = uploader.get_chunk_streams() + running_futures = [] + for _ in range(max_concurrency): + try: + chunk = await upload_tasks.__anext__() + running_futures.append(asyncio.ensure_future(uploader.process_chunk(chunk))) + except StopAsyncIteration: + break + + range_ids = await _async_parallel_uploads(uploader.process_chunk, upload_tasks, running_futures) + else: + range_ids = [] + async for chunk in uploader.get_chunk_streams(): + range_ids.append(await uploader.process_chunk(chunk)) + + if any(range_ids): + return [r[1] for r in sorted(range_ids, key=lambda r: r[0])] + return uploader.response_headers + + +async def upload_substream_blocks( + service=None, + uploader_class=None, + total_size=None, + chunk_size=None, + max_concurrency=None, + stream=None, + progress_hook=None, + **kwargs): + parallel = max_concurrency > 1 + if parallel and 'modified_access_conditions' in kwargs: + # Access conditions do not work with parallelism + kwargs['modified_access_conditions'] = None + uploader = uploader_class( + service=service, + total_size=total_size, + chunk_size=chunk_size, + stream=stream, + parallel=parallel, + progress_hook=progress_hook, + **kwargs) + + if parallel: + upload_tasks = uploader.get_substream_blocks() + running_futures = [ + asyncio.ensure_future(uploader.process_substream_block(u)) + for u in islice(upload_tasks, 0, max_concurrency) + ] + range_ids = await _parallel_uploads(uploader.process_substream_block, upload_tasks, running_futures) + else: + range_ids = [] + for block in uploader.get_substream_blocks(): + range_ids.append(await uploader.process_substream_block(block)) + if any(range_ids): + return sorted(range_ids) + return + + +class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes + + def __init__( + self, service, + total_size, + chunk_size, + stream, + parallel, + encryptor=None, + padder=None, + progress_hook=None, + **kwargs): + self.service = service + self.total_size = total_size + self.chunk_size = chunk_size + self.stream = stream + self.parallel = parallel + + # Stream management + self.stream_lock = threading.Lock() if parallel else None + + # Progress feedback + self.progress_total = 0 + self.progress_lock = Lock() if parallel else None + self.progress_hook = progress_hook + + # Encryption + self.encryptor = encryptor + self.padder = padder + self.response_headers = None + self.etag = None + self.last_modified = None + self.request_options = kwargs + + async def get_chunk_streams(self): + index = 0 + while True: + data = b'' + read_size = self.chunk_size + + # Buffer until we either reach the end of the stream or get a whole chunk. + while True: + if self.total_size: + read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data))) + temp = self.stream.read(read_size) + if inspect.isawaitable(temp): + temp = await temp + if not isinstance(temp, bytes): + raise TypeError('Blob data should be of type bytes.') + data += temp or b"" + + # We have read an empty string and so are at the end + # of the buffer or we have read a full chunk. + if temp == b'' or len(data) == self.chunk_size: + break + + if len(data) == self.chunk_size: + if self.padder: + data = self.padder.update(data) + if self.encryptor: + data = self.encryptor.update(data) + yield index, data + else: + if self.padder: + data = self.padder.update(data) + self.padder.finalize() + if self.encryptor: + data = self.encryptor.update(data) + self.encryptor.finalize() + if data: + yield index, data + break + index += len(data) + + async def process_chunk(self, chunk_data): + chunk_bytes = chunk_data[1] + chunk_offset = chunk_data[0] + return await self._upload_chunk_with_progress(chunk_offset, chunk_bytes) + + async def _update_progress(self, length): + if self.progress_lock is not None: + async with self.progress_lock: + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + await self.progress_hook(self.progress_total, self.total_size) + + async def _upload_chunk(self, chunk_offset, chunk_data): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_chunk_with_progress(self, chunk_offset, chunk_data): + range_id = await self._upload_chunk(chunk_offset, chunk_data) + await self._update_progress(len(chunk_data)) + return range_id + + def get_substream_blocks(self): + assert self.chunk_size is not None + lock = self.stream_lock + blob_length = self.total_size + + if blob_length is None: + blob_length = get_length(self.stream) + if blob_length is None: + raise ValueError("Unable to determine content length of upload data.") + + blocks = int(ceil(blob_length / (self.chunk_size * 1.0))) + last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size + + for i in range(blocks): + index = i * self.chunk_size + length = last_block_size if i == blocks - 1 else self.chunk_size + yield index, SubStream(self.stream, index, length, lock) + + async def process_substream_block(self, block_data): + return await self._upload_substream_block_with_progress(block_data[0], block_data[1]) + + async def _upload_substream_block(self, index, block_stream): + raise NotImplementedError("Must be implemented by child class.") + + async def _upload_substream_block_with_progress(self, index, block_stream): + range_id = await self._upload_substream_block(index, block_stream) + await self._update_progress(len(block_stream)) + return range_id + + def set_response_properties(self, resp): + self.etag = resp.etag + self.last_modified = resp.last_modified + + +class BlockBlobChunkUploader(_ChunkUploader): + + def __init__(self, *args, **kwargs): + kwargs.pop('modified_access_conditions', None) + super(BlockBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + # TODO: This is incorrect, but works with recording. + index = f'{chunk_offset:032d}' + block_id = encode_base64(url_quote(encode_base64(index))) + await self.service.stage_block( + block_id, + len(chunk_data), + body=chunk_data, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + return index, block_id + + async def _upload_substream_block(self, index, block_stream): + try: + block_id = f'BlockId{"%05d" % (index/self.chunk_size)}' + await self.service.stage_block( + block_id, + len(block_stream), + block_stream, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + finally: + block_stream.close() + return block_id + + +class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def _is_chunk_empty(self, chunk_data): + # read until non-zero byte is encountered + # if reached the end without returning, then chunk_data is all 0's + for each_byte in chunk_data: + if each_byte not in [0, b'\x00']: + return False + return True + + async def _upload_chunk(self, chunk_offset, chunk_data): + # avoid uploading the empty pages + if not self._is_chunk_empty(chunk_data): + chunk_end = chunk_offset + len(chunk_data) - 1 + content_range = f'bytes={chunk_offset}-{chunk_end}' + computed_md5 = None + self.response_headers = await self.service.upload_pages( + body=chunk_data, + content_length=len(chunk_data), + transactional_content_md5=computed_md5, + range=content_range, + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + pass + + +class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + def __init__(self, *args, **kwargs): + super(AppendBlobChunkUploader, self).__init__(*args, **kwargs) + self.current_length = None + + async def _upload_chunk(self, chunk_offset, chunk_data): + if self.current_length is None: + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + self.current_length = int(self.response_headers['blob_append_offset']) + else: + self.request_options['append_position_access_conditions'].append_position = \ + self.current_length + chunk_offset + self.response_headers = await self.service.append_block( + body=chunk_data, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options) + + async def _upload_substream_block(self, index, block_stream): + pass + + +class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + self.response_headers = await self.service.append_data( + body=chunk_data, + position=chunk_offset, + content_length=len(chunk_data), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + + if not self.parallel and self.request_options.get('modified_access_conditions'): + self.request_options['modified_access_conditions'].if_match = self.response_headers['etag'] + + async def _upload_substream_block(self, index, block_stream): + try: + await self.service.append_data( + body=block_stream, + position=index, + content_length=len(block_stream), + cls=return_response_headers, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + finally: + block_stream.close() + + +class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method + + async def _upload_chunk(self, chunk_offset, chunk_data): + length = len(chunk_data) + chunk_end = chunk_offset + length - 1 + response = await self.service.upload_range( + chunk_data, + chunk_offset, + length, + data_stream_total=self.total_size, + upload_stream_current=self.progress_total, + **self.request_options + ) + range_id = f'bytes={chunk_offset}-{chunk_end}' + return range_id, response + + # TODO: Implement this method. + async def _upload_substream_block(self, index, block_stream): + pass + + +class AsyncIterStreamer(): + """ + File-like streaming object for AsyncGenerators. + """ + def __init__(self, generator: AsyncGenerator[Union[bytes, str], None], encoding: str = "UTF-8"): + self.iterator = generator.__aiter__() + self.leftover = b"" + self.encoding = encoding + + def seekable(self): + return False + + def tell(self, *args, **kwargs): + raise UnsupportedOperation("Data generator does not support tell.") + + def seek(self, *args, **kwargs): + raise UnsupportedOperation("Data generator is not seekable.") + + async def read(self, size: int) -> bytes: + data = self.leftover + count = len(self.leftover) + try: + while count < size: + chunk = await self.iterator.__anext__() + if isinstance(chunk, str): + chunk = chunk.encode(self.encoding) + data += chunk + count += len(chunk) + # This means count < size and what's leftover will be returned in this call. + except StopAsyncIteration: + self.leftover = b"" + + if count >= size: + self.leftover = data[size:] + + return data[:size] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared_access_signature.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared_access_signature.py new file mode 100644 index 00000000000..234d1f9d193 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_shared_access_signature.py @@ -0,0 +1,495 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, List, TYPE_CHECKING +) + +from ._shared import sign_string +from ._shared.constants import X_MS_VERSION +from ._shared.models import Services +from ._shared.shared_access_signature import SharedAccessSignature, _SharedAccessHelper, QueryStringConstants +from ._shared.parser import _str + +if TYPE_CHECKING: + from datetime import datetime + from .import ( + ResourceTypes, + AccountSasPermissions, + ShareSasPermissions, + FileSasPermissions + ) + +class FileSharedAccessSignature(SharedAccessSignature): + ''' + Provides a factory for creating file and share access + signature tokens with a common account name and account key. Users can either + use the factory or can construct the appropriate service and use the + generate_*_shared_access_signature method directly. + ''' + + def __init__(self, account_name, account_key): + ''' + :param str account_name: + The storage account name used to generate the shared access signatures. + :param str account_key: + The access key to generate the shares access signatures. + ''' + super(FileSharedAccessSignature, self).__init__(account_name, account_key, x_ms_version=X_MS_VERSION) + + def generate_file(self, share_name, directory_name=None, file_name=None, + permission=None, expiry=None, start=None, policy_id=None, + ip=None, protocol=None, cache_control=None, + content_disposition=None, content_encoding=None, + content_language=None, content_type=None): + ''' + Generates a shared access signature for the file. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param str directory_name: + Name of directory. SAS tokens cannot be created for directories, so + this parameter should only be present if file_name is provided. + :param str file_name: + Name of file. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered rcwd. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + resource_path = share_name + if directory_name is not None: + resource_path += '/' + _str(directory_name) if directory_name is not None else None + resource_path += '/' + _str(file_name) if file_name is not None else None + + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('f') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, resource_path) + + return sas.get_token() + + def generate_share(self, share_name, permission=None, expiry=None, + start=None, policy_id=None, ip=None, protocol=None, + cache_control=None, content_disposition=None, + content_encoding=None, content_language=None, + content_type=None): + ''' + Generates a shared access signature for the share. + Use the returned signature with the sas_token parameter of FileService. + + :param str share_name: + Name of share. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered rcwdl. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ShareSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + set_file_service_properties. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :param str protocol: + Specifies the protocol permitted for a request made. The default value + is https,http. See :class:`~azure.storage.common.models.Protocol` for possible values. + :param str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :param str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :param str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :param str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :param str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + ''' + sas = _FileSharedAccessHelper() + sas.add_base(permission, expiry, start, ip, protocol, self.x_ms_version) + sas.add_id(policy_id) + sas.add_resource('s') + sas.add_override_response_headers(cache_control, content_disposition, + content_encoding, content_language, + content_type) + sas.add_resource_signature(self.account_name, self.account_key, share_name) + + return sas.get_token() + + +class _FileSharedAccessHelper(_SharedAccessHelper): + + def add_resource_signature(self, account_name, account_key, path): + def get_value_to_append(query): + return_value = self.query_dict.get(query) or '' + return return_value + '\n' + + if path[0] != '/': + path = '/' + path + + canonicalized_resource = '/file/' + account_name + path + '\n' + + # Form the string to sign from shared_access_policy and canonicalized + # resource. The order of values is important. + string_to_sign = \ + (get_value_to_append(QueryStringConstants.SIGNED_PERMISSION) + + get_value_to_append(QueryStringConstants.SIGNED_START) + + get_value_to_append(QueryStringConstants.SIGNED_EXPIRY) + + canonicalized_resource + + get_value_to_append(QueryStringConstants.SIGNED_IDENTIFIER) + + get_value_to_append(QueryStringConstants.SIGNED_IP) + + get_value_to_append(QueryStringConstants.SIGNED_PROTOCOL) + + get_value_to_append(QueryStringConstants.SIGNED_VERSION) + + get_value_to_append(QueryStringConstants.SIGNED_CACHE_CONTROL) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_DISPOSITION) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_ENCODING) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_LANGUAGE) + + get_value_to_append(QueryStringConstants.SIGNED_CONTENT_TYPE)) + + # remove the trailing newline + if string_to_sign[-1] == '\n': + string_to_sign = string_to_sign[:-1] + + self._add_query(QueryStringConstants.SIGNED_SIGNATURE, + sign_string(account_key, string_to_sign)) + + +def generate_account_sas( + account_name, # type: str + account_key, # type: str + resource_types, # type: Union[ResourceTypes, str] + permission, # type: Union[AccountSasPermissions, str] + expiry, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for the file service. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param ~azure.storage.fileshare.ResourceTypes resource_types: + Specifies the resource types that are accessible with the account SAS. + :param ~azure.storage.fileshare.AccountSasPermissions permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication.py + :start-after: [START generate_sas_token] + :end-before: [END generate_sas_token] + :language: python + :dedent: 8 + :caption: Generate a sas token. + """ + sas = SharedAccessSignature(account_name, account_key) + return sas.generate_account( + services=Services(fileshare=True), + resource_types=resource_types, + permission=permission, + expiry=expiry, + start=start, + ip=ip, + **kwargs + ) # type: ignore + + +def generate_share_sas( + account_name, # type: str + share_name, # type: str + account_key, # type: str + permission=None, # type: Optional[Union[ShareSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): # type: (...) -> str + """Generates a shared access signature for a share. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered rcwdl. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or ShareSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. To create a stored access policy, use + :func:`~azure.storage.fileshare.ShareClient.set_share_access_policy`. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + return sas.generate_share( + share_name=share_name, + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) + + +def generate_file_sas( + account_name, # type: str + share_name, # type: str + file_path, # type: List[str] + account_key, # type: str + permission=None, # type: Optional[Union[FileSasPermissions, str]] + expiry=None, # type: Optional[Union[datetime, str]] + start=None, # type: Optional[Union[datetime, str]] + policy_id=None, # type: Optional[str] + ip=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> str + """Generates a shared access signature for a file. + + Use the returned signature with the credential parameter of any ShareServiceClient, + ShareClient, ShareDirectoryClient, or ShareFileClient. + + :param str account_name: + The storage account name used to generate the shared access signature. + :param str share_name: + The name of the share. + :param file_path: + The file path represented as a list of path segments, including the file name. + :type file_path: List[str] + :param str account_key: + The account key, also called shared key or access key, to generate the shared access signature. + :param permission: + The permissions associated with the shared access signature. The + user is restricted to operations allowed by the permissions. + Permissions must be ordered rcwd. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has been + specified in an associated stored access policy. + :type permission: str or FileSasPermissions + :param expiry: + The time at which the shared access signature becomes invalid. + Required unless an id is given referencing a stored access policy + which contains this field. This field must be omitted if it has + been specified in an associated stored access policy. Azure will always + convert values to UTC. If a date is passed in without timezone info, it + is assumed to be UTC. + :type expiry: ~datetime.datetime or str + :param start: + The time at which the shared access signature becomes valid. If + omitted, start time for this call is assumed to be the time when the + storage service receives the request. Azure will always convert values + to UTC. If a date is passed in without timezone info, it is assumed to + be UTC. + :type start: ~datetime.datetime or str + :param str policy_id: + A unique value up to 64 characters in length that correlates to a + stored access policy. + :param str ip: + Specifies an IP address or a range of IP addresses from which to accept requests. + If the IP address from which the request originates does not match the IP address + or address range specified on the SAS token, the request is not authenticated. + For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS + restricts the request to those IP addresses. + :keyword str cache_control: + Response header value for Cache-Control when resource is accessed + using this shared access signature. + :keyword str content_disposition: + Response header value for Content-Disposition when resource is accessed + using this shared access signature. + :keyword str content_encoding: + Response header value for Content-Encoding when resource is accessed + using this shared access signature. + :keyword str content_language: + Response header value for Content-Language when resource is accessed + using this shared access signature. + :keyword str content_type: + Response header value for Content-Type when resource is accessed + using this shared access signature. + :keyword str protocol: + Specifies the protocol permitted for a request made. The default value is https. + :return: A Shared Access Signature (sas) token. + :rtype: str + """ + sas = FileSharedAccessSignature(account_name, account_key) + if len(file_path) > 1: + dir_path = '/'.join(file_path[:-1]) + else: + dir_path = None # type: ignore + return sas.generate_file( # type: ignore + share_name=share_name, + directory_name=dir_path, + file_name=file_path[-1], + permission=permission, + expiry=expiry, + start=start, + policy_id=policy_id, + ip=ip, + **kwargs + ) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_version.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_version.py new file mode 100644 index 00000000000..00ae79f3b19 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/_version.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +VERSION = "12.12.0b1" diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/__init__.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/__init__.py new file mode 100644 index 00000000000..73393b819df --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/__init__.py @@ -0,0 +1,20 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +from ._file_client_async import ShareFileClient +from ._directory_client_async import ShareDirectoryClient +from ._share_client_async import ShareClient +from ._share_service_client_async import ShareServiceClient +from ._lease_async import ShareLeaseClient + + +__all__ = [ + 'ShareFileClient', + 'ShareDirectoryClient', + 'ShareClient', + 'ShareServiceClient', + 'ShareLeaseClient', +] diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_directory_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_directory_client_async.py new file mode 100644 index 00000000000..e3bc53403e4 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_directory_client_async.py @@ -0,0 +1,872 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +import sys +import time +import warnings +from datetime import datetime +from typing import ( + Any, AnyStr, AsyncIterable, Dict, IO, Iterable, Optional, Union, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError, ResourceNotFoundError +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _get_file_permission, _datetime_to_str +from .._shared.parser import _str +from .._generated.aio import AzureFileStorage +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.policies_async import ExponentialRetry +from .._shared.request_handlers import add_metadata_headers +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_directory_properties +from .._serialize import get_api_version, get_dest_access_conditions, get_rename_smb_properties +from .._directory_client import ShareDirectoryClient as ShareDirectoryClientBase +from ._file_client_async import ShareFileClient +from ._models import DirectoryPropertiesPaged, HandlesPaged + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from .._models import DirectoryProperties, Handle, NTFSAttributes + + +class ShareDirectoryClient(AsyncStorageAccountHostsMixin, ShareDirectoryClientBase): + """A client to interact with a specific directory, although it may not yet exist. + + For operations relating to a specific subdirectory or file in this share, the clients for those + entities can also be retrieved using the :func:`get_subdirectory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the directory, + use the :func:`from_directory_url` classmethod. + :param share_name: + The name of the share for the directory. + :type share_name: str + :param str directory_path: + The directory path for the directory with which to interact. + If specified, this value will override a directory value specified in the directory URL. + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( + self, account_url: str, + share_name: str, + directory_path: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + if loop and sys.version_info >= (3, 8): + warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level" + "APIs in Python 3.8 and is no longer supported.", DeprecationWarning) + super(ShareDirectoryClient, self).__init__( + account_url, + share_name=share_name, + directory_path=directory_path, + snapshot=snapshot, + credential=credential, + **kwargs) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + def get_file_client(self, file_name, **kwargs): + # type: (str, Any) -> ShareFileClient + """Get a client to interact with a specific file. + + The file need not already exist. + + :param str file_name: + The name of the file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if self.directory_path: + file_name = self.directory_path.rstrip('/') + "/" + file_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareFileClient( + self.url, file_path=file_name, share_name=self.share_name, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent, + **kwargs) + + def get_subdirectory_client(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Get a client to interact with a specific subdirectory. + + The subdirectory need not already exist. + + :param str directory_name: + The name of the subdirectory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START get_subdirectory_client] + :end-before: [END get_subdirectory_client] + :language: python + :dedent: 16 + :caption: Gets the subdirectory client. + """ + directory_path = self.directory_path.rstrip('/') + "/" + directory_name + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent, + **kwargs) + + @distributed_trace_async + async def create_directory(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new directory under the directory referenced by the client. + + :keyword file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "none" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + Creation time for the directory. Default value: "now". + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + Last write time for the directory. Default value: "now". + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword str file_permission: + If specified the permission (security descriptor) shall be set + for the directory/file. This header can be used if Permission size is + <= 8KB, else file-permission-key header shall be used. + Default value: Inherit. If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword str file_permission_key: + Key of the permission to be set for the directory/file. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword file_change_time: + Change time for the directory. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_directory] + :end-before: [END create_directory] + :language: python + :dedent: 16 + :caption: Creates a directory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + file_attributes = kwargs.pop('file_attributes', 'none') + file_creation_time = kwargs.pop('file_creation_time', 'now') + file_last_write_time = kwargs.pop('file_last_write_time', 'now') + file_change_time = kwargs.pop('file_change_time', None) + file_permission = kwargs.pop('file_permission', None) + file_permission_key = kwargs.pop('file_permission_key', None) + file_permission = _get_file_permission(file_permission, file_permission_key, 'inherit') + + try: + return await self._client.directory.create( # type: ignore + file_attributes=str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=file_permission_key, + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_directory(self, **kwargs): + # type: (**Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_directory] + :end-before: [END delete_directory] + :language: python + :dedent: 16 + :caption: Deletes a directory. + """ + timeout = kwargs.pop('timeout', None) + try: + await self._client.directory.delete(timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def rename_directory( + self, new_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> ShareDirectoryClient + """ + Rename the source directory. + + :param str new_name: + The new directory name. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool overwrite: + A boolean value for if the destination file already exists, whether this request will + overwrite the file or not. If true, the rename will succeed and will overwrite the + destination file. If not provided or if false and the destination file does exist, the + request will not overwrite the destination file. If provided and the destination file + doesn't exist, the rename will succeed. + :keyword bool ignore_read_only: + A boolean value that specifies whether the ReadOnly attribute on a preexisting destination + file should be respected. If true, the rename will succeed, otherwise, a previous file at the + destination with the ReadOnly attribute set will cause the rename to fail. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory. This header + can be used if Permission size is <= 8KB, else file_permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + A value of 'preserve' can be passed to preserve source permissions. + Note: Only one of the file_permission or file_permission_key should be specified. + :keyword str file_permission_key: + Key of the permission to be set for the directory. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword file_attributes: + The file system attributes for the directory. + :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str + :keyword file_creation_time: + Creation time for the directory. + :paramtype file_creation_time:~datetime.datetime or str + :keyword file_last_write_time: + Last write time for the file. + :paramtype file_last_write_time:~datetime.datetime or str + :keyword file_change_time: + Change time for the directory. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword Dict[str,str] metadata: + A name-value pair to associate with a file storage object. + :keyword destination_lease: + Required if the destination file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str + :returns: The new Directory Client. + :rtype: ~azure.storage.fileshare.ShareDirectoryClient + """ + if not new_name: + raise ValueError("Please specify a new directory name.") + + new_name = new_name.strip('/') + new_path_and_query = new_name.split('?') + new_dir_path = new_path_and_query[0] + if len(new_path_and_query) == 2: + new_dir_sas = new_path_and_query[1] or self._query_str.strip('?') + else: + new_dir_sas = self._query_str.strip('?') + + new_directory_client = ShareDirectoryClient( + '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_dir_path, + credential=new_dir_sas or self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent + ) + + kwargs.update(get_rename_smb_properties(kwargs)) + + timeout = kwargs.pop('timeout', None) + overwrite = kwargs.pop('overwrite', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + + destination_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) + + try: + await new_directory_client._client.directory.rename( # pylint: disable=protected-access + self.url, + timeout=timeout, + replace_if_exists=overwrite, + destination_lease_access_conditions=destination_access_conditions, + headers=headers, + **kwargs) + + return new_directory_client + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files(self, name_starts_with=None, **kwargs): + # type: (Optional[str], Any) -> AsyncItemPaged + """Lists all the directories and files under the directory. + + :param str name_starts_with: + Filters the results to return only entities whose names + begin with the specified prefix. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + :rtype: ~azure.core.async_paging.AsyncItemPaged[DirectoryProperties and FileProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START lists_directory] + :end-before: [END lists_directory] + :language: python + :dedent: 16 + :caption: List directories and files. + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_files_and_directories_segment, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=DirectoryPropertiesPaged) + + @distributed_trace + def list_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> AsyncItemPaged + """Lists opened handles on a directory or a file under the directory. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of Handle + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.Handle] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.directory.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + recursive=recursive, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def exists(self, **kwargs): + # type: (**Any) -> bool + """ + Returns True if a directory exists and returns False otherwise. + + :kwarg int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: True if the directory exists, False otherwise. + :rtype: bool + """ + try: + await self._client.directory.get_properties(**kwargs) + return True + except HttpResponseError as error: + try: + process_storage_error(error) + except ResourceNotFoundError: + return False + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, Handle], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.directory.force_close_handles( + handle_id, + marker=None, + recursive=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, recursive=False, **kwargs): + # type: (bool, Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :param bool recursive: + Boolean that specifies if operation should apply to the directory specified by the client, + its files, its subdirectories and their files. Default value is False. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.directory.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + recursive=recursive, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } + + @distributed_trace_async + async def get_directory_properties(self, **kwargs): + # type: (Any) -> DirectoryProperties + """Returns all user-defined metadata and system properties for the + specified directory. The data returned does not include the directory's + list of files. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: DirectoryProperties + :rtype: ~azure.storage.fileshare.DirectoryProperties + """ + timeout = kwargs.pop('timeout', None) + try: + response = await self._client.directory.get_properties( + timeout=timeout, + cls=deserialize_directory_properties, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response # type: ignore + + @distributed_trace_async + async def set_directory_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the directory. + + Each call to this operation replaces all existing metadata + attached to the directory. To remove all metadata from the directory, + call this operation with an empty metadata dict. + + :param metadata: + Name-value pairs associated with the directory as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Directory-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.directory.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_http_headers(self, file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Optional[Union[str, datetime]] + file_last_write_time="preserve", # type: Optional[Union[str, datetime]] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the directory. + + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the directory. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + timeout = kwargs.pop('timeout', None) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + file_change_time = kwargs.pop('file_change_time', None) + try: + return await self._client.directory.set_properties( # type: ignore + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=permission_key, + timeout=timeout, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> ShareDirectoryClient + """Creates a new subdirectory and returns a client to interact + with the subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the subdirectory as metadata. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START create_subdirectory] + :end-before: [END create_subdirectory] + :language: python + :dedent: 16 + :caption: Create a subdirectory. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.create_directory(metadata=metadata, timeout=timeout, **kwargs) + return subdir # type: ignore + + @distributed_trace_async + async def delete_subdirectory( + self, directory_name, # type: str + **kwargs + ): + # type: (...) -> None + """Deletes a subdirectory. + + :param str directory_name: + The name of the subdirectory. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_subdirectory] + :end-before: [END delete_subdirectory] + :language: python + :dedent: 16 + :caption: Delete a subdirectory. + """ + timeout = kwargs.pop('timeout', None) + subdir = self.get_subdirectory_client(directory_name) + await subdir.delete_directory(timeout=timeout, **kwargs) + + @distributed_trace_async + async def upload_file( + self, file_name: str, + data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]], + length: Optional[int] = None, + **kwargs + ) -> ShareFileClient: + """Creates a new file in the directory and returns a ShareFileClient + to interact with the file. + + :param str file_name: + The name of the file. + :param data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword progress_hook: + An async callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transferred + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str encoding: + Defaults to UTF-8. + :returns: ShareFileClient + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START upload_file_to_directory] + :end-before: [END upload_file_to_directory] + :language: python + :dedent: 16 + :caption: Upload a file to a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.upload_file( + data, + length=length, + **kwargs) + return file_client + + @distributed_trace_async + async def delete_file( + self, file_name, # type: str + **kwargs # type: Optional[Any] + ): + # type: (...) -> None + """Marks the specified file for deletion. The file is later + deleted during garbage collection. + + :param str file_name: + The name of the file to delete. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_directory_async.py + :start-after: [START delete_file_in_directory] + :end-before: [END delete_file_in_directory] + :language: python + :dedent: 16 + :caption: Delete a file in a directory. + """ + file_client = self.get_file_client(file_name) + await file_client.delete_file(**kwargs) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_download_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_download_async.py new file mode 100644 index 00000000000..9feae5ff993 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_download_async.py @@ -0,0 +1,480 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import asyncio +import sys +import warnings +from io import BytesIO +from itertools import islice +from typing import AsyncIterator + +from azure.core.exceptions import HttpResponseError, ResourceModifiedError +from .._download import _ChunkDownloader +from .._shared.request_handlers import validate_and_format_range_headers +from .._shared.response_handlers import process_storage_error, parse_length_from_content_range + + +async def process_content(data): + if data is None: + raise ValueError("Response cannot be None.") + + try: + return data.response.body() + except Exception as error: + raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error) + + +class _AsyncChunkDownloader(_ChunkDownloader): + def __init__(self, **kwargs): + super(_AsyncChunkDownloader, self).__init__(**kwargs) + self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None + self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None + + async def process_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + chunk_data = await self._download_chunk(chunk_start, chunk_end - 1) + length = chunk_end - chunk_start + if length > 0: + await self._write_to_stream(chunk_data, chunk_start) + await self._update_progress(length) + + async def yield_chunk(self, chunk_start): + chunk_start, chunk_end = self._calculate_range(chunk_start) + return await self._download_chunk(chunk_start, chunk_end - 1) + + async def _update_progress(self, length): + if self.progress_lock: + async with self.progress_lock: # pylint: disable=not-async-context-manager + self.progress_total += length + else: + self.progress_total += length + + if self.progress_hook: + await self.progress_hook(self.progress_total, self.total_size) + + async def _write_to_stream(self, chunk_data, chunk_start): + if self.stream_lock: + async with self.stream_lock: # pylint: disable=not-async-context-manager + self.stream.seek(self.stream_start + (chunk_start - self.start_index)) + self.stream.write(chunk_data) + else: + self.stream.write(chunk_data) + + async def _download_chunk(self, chunk_start, chunk_end): + range_header, range_validation = validate_and_format_range_headers( + chunk_start, + chunk_end, + check_content_md5=self.validate_content + ) + try: + _, response = await self.client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self.validate_content, + data_stream_total=self.total_size, + download_stream_current=self.progress_total, + **self.request_options + ) + if response.properties.etag != self.etag: + raise ResourceModifiedError(message="The file has been modified while downloading.") + except HttpResponseError as error: + process_storage_error(error) + + chunk_data = await process_content(response) + return chunk_data + + +class _AsyncChunkIterator(object): + """Async iterator for chunks in blob download stream.""" + + def __init__(self, size, content, downloader, chunk_size): + self.size = size + self._chunk_size = chunk_size + self._current_content = content + self._iter_downloader = downloader + self._iter_chunks = None + self._complete = (size == 0) + + def __len__(self): + return self.size + + def __iter__(self): + raise TypeError("Async stream must be iterated asynchronously.") + + def __aiter__(self): + return self + + async def __anext__(self): + """Iterate through responses.""" + if self._complete: + raise StopAsyncIteration("Download complete") + if not self._iter_downloader: + # cut the data obtained from initial GET into chunks + if len(self._current_content) > self._chunk_size: + return self._get_chunk_data() + self._complete = True + return self._current_content + + if not self._iter_chunks: + self._iter_chunks = self._iter_downloader.get_chunk_offsets() + + # initial GET result still has more than _chunk_size bytes of data + if len(self._current_content) >= self._chunk_size: + return self._get_chunk_data() + + try: + chunk = next(self._iter_chunks) + self._current_content += await self._iter_downloader.yield_chunk(chunk) + except StopIteration: + self._complete = True + # it's likely that there some data left in self._current_content + if self._current_content: + return self._current_content + raise StopAsyncIteration("Download complete") + + return self._get_chunk_data() + + def _get_chunk_data(self): + chunk_data = self._current_content[: self._chunk_size] + self._current_content = self._current_content[self._chunk_size:] + return chunk_data + + +class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes + """A streaming object to download from Azure Storage. + + :ivar str name: + The name of the file being downloaded. + :ivar: str path: + The full path of the file. + :ivar str share: + The name of the share where the file is. + :ivar ~azure.storage.fileshare.FileProperties properties: + The properties of the file being downloaded. If only a range of the data is being + downloaded, this will be reflected in the properties. + :ivar int size: + The size of the total data in the stream. This will be the byte range if specified, + otherwise the total size of the file. + """ + + def __init__( + self, + client=None, + config=None, + start_range=None, + end_range=None, + validate_content=None, + max_concurrency=1, + name=None, + path=None, + share=None, + encoding=None, + **kwargs + ): + self.name = name + self.path = path + self.share = share + self.properties = None + self.size = None + + self._client = client + self._config = config + self._start_range = start_range + self._end_range = end_range + self._max_concurrency = max_concurrency + self._encoding = encoding + self._validate_content = validate_content + self._progress_hook = kwargs.pop('progress_hook', None) + self._request_options = kwargs + self._location_mode = None + self._download_complete = False + self._current_content = None + self._file_size = None + self._response = None + self._etag = None + + # The service only provides transactional MD5s for chunks under 4MB. + # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first + # chunk so a transactional MD5 can be retrieved. + self._first_get_size = self._config.max_single_get_size if not self._validate_content \ + else self._config.max_chunk_get_size + initial_request_start = self._start_range if self._start_range is not None else 0 + if self._end_range is not None and self._end_range - self._start_range < self._first_get_size: + initial_request_end = self._end_range + else: + initial_request_end = initial_request_start + self._first_get_size - 1 + + self._initial_range = (initial_request_start, initial_request_end) + + def __len__(self): + return self.size + + async def _setup(self): + self._response = await self._initial_request() + self.properties = self._response.properties + self.properties.name = self.name + self.properties.path = self.path + self.properties.share = self.share + + # Set the content length to the download size instead of the size of + # the last range + self.properties.size = self.size + + # Overwrite the content range to the user requested range + self.properties.content_range = 'bytes {0}-{1}/{2}'.format( + self._start_range, + self._end_range, + self._file_size + ) + + # Overwrite the content MD5 as it is the MD5 for the last range instead + # of the stored MD5 + # TODO: Set to the stored MD5 when the service returns this + self.properties.content_md5 = None + + if self.size == 0: + self._current_content = b"" + else: + self._current_content = await process_content(self._response) + + async def _initial_request(self): + range_header, range_validation = validate_and_format_range_headers( + self._initial_range[0], + self._initial_range[1], + start_range_required=False, + end_range_required=False, + check_content_md5=self._validate_content) + + try: + location_mode, response = await self._client.download( + range=range_header, + range_get_content_md5=range_validation, + validate_content=self._validate_content, + data_stream_total=None, + download_stream_current=0, + **self._request_options) + + # Check the location we read from to ensure we use the same one + # for subsequent requests. + self._location_mode = location_mode + + # Parse the total file size and adjust the download size if ranges + # were specified + self._file_size = parse_length_from_content_range(response.properties.content_range) + if self._file_size is None: + raise ValueError("Required Content-Range response header is missing or malformed.") + + if self._end_range is not None: + # Use the length unless it is over the end of the file + self.size = min(self._file_size, self._end_range - self._start_range + 1) + elif self._start_range is not None: + self.size = self._file_size - self._start_range + else: + self.size = self._file_size + + except HttpResponseError as error: + if self._start_range is None and error.response and error.response.status_code == 416: + # Get range will fail on an empty file. If the user did not + # request a range, do a regular get request in order to get + # any properties. + try: + _, response = await self._client.download( + validate_content=self._validate_content, + data_stream_total=0, + download_stream_current=0, + **self._request_options) + except HttpResponseError as error: + process_storage_error(error) + + # Set the download size to empty + self.size = 0 + self._file_size = 0 + else: + process_storage_error(error) + + # If the file is small, the download is complete at this point. + # If file size is large, download the rest of the file in chunks. + if response.properties.size == self.size: + self._download_complete = True + self._etag = response.properties.etag + return response + + def chunks(self): + # type: () -> AsyncIterator[bytes] + """Iterate over chunks in the download stream. + + :rtype: AsyncIterator[bytes] + """ + if self.size == 0 or self._download_complete: + iter_downloader = None + else: + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + iter_downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # Start where the first download ended + end_range=data_end, + stream=None, + parallel=False, + validate_content=self._validate_content, + use_location=self._location_mode, + etag=self._etag, + **self._request_options) + return _AsyncChunkIterator( + size=self.size, + content=self._current_content, + downloader=iter_downloader, + chunk_size=self._config.max_chunk_get_size + ) + + async def readall(self): + # type: () -> bytes + """Download the contents of this file. + + This operation is blocking until all data is downloaded. + :rtype: bytes + """ + stream = BytesIO() + await self.readinto(stream) + data = stream.getvalue() + if self._encoding: + return data.decode(self._encoding) + return data + + async def content_as_bytes(self, max_concurrency=1): + """DEPRECATED: Download the contents of this file. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :rtype: bytes + """ + warnings.warn( + "content_as_bytes is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + return await self.readall() + + async def content_as_text(self, max_concurrency=1, encoding="UTF-8"): + """DEPRECATED: Download the contents of this file, and decode as text. + + This operation is blocking until all data is downloaded. + + This method is deprecated, use func:`readall` instead. + + :keyword int max_concurrency: + The number of parallel connections with which to download. + :param str encoding: + Test encoding to decode the downloaded bytes. Default is UTF-8. + :rtype: str + """ + warnings.warn( + "content_as_text is deprecated, use readall instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + self._encoding = encoding + return await self.readall() + + async def readinto(self, stream): + """Download the contents of this file to a stream. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The number of bytes read. + :rtype: int + """ + # the stream must be seekable if parallel download is required + parallel = self._max_concurrency > 1 + if parallel: + error_message = "Target stream handle must be seekable." + if sys.version_info >= (3,) and not stream.seekable(): + raise ValueError(error_message) + + try: + stream.seek(stream.tell()) + except (NotImplementedError, AttributeError): + raise ValueError(error_message) + + # Write the content to the user stream + stream.write(self._current_content) + if self._progress_hook: + await self._progress_hook(len(self._current_content), self.size) + + if self._download_complete: + return self.size + + data_end = self._file_size + if self._end_range is not None: + # Use the length unless it is over the end of the file + data_end = min(self._file_size, self._end_range + 1) + + downloader = _AsyncChunkDownloader( + client=self._client, + total_size=self.size, + chunk_size=self._config.max_chunk_get_size, + current_progress=self._first_get_size, + start_range=self._initial_range[1] + 1, # start where the first download ended + end_range=data_end, + stream=stream, + parallel=parallel, + validate_content=self._validate_content, + use_location=self._location_mode, + progress_hook=self._progress_hook, + etag=self._etag, + **self._request_options) + + dl_tasks = downloader.get_chunk_offsets() + running_futures = [ + asyncio.ensure_future(downloader.process_chunk(d)) + for d in islice(dl_tasks, 0, self._max_concurrency) + ] + while running_futures: + # Wait for some download to finish before adding a new one + _done, running_futures = await asyncio.wait( + running_futures, return_when=asyncio.FIRST_COMPLETED) + try: + next_chunk = next(dl_tasks) + except StopIteration: + break + else: + running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk))) + + if running_futures: + # Wait for the remaining downloads to finish + await asyncio.wait(running_futures) + return self.size + + async def download_to_stream(self, stream, max_concurrency=1): + """Download the contents of this file to a stream. + + This method is deprecated, use func:`readinto` instead. + + :param stream: + The stream to download to. This can be an open file-handle, + or any writable stream. The stream must be seekable if the download + uses more than one parallel connection. + :returns: The properties of the downloaded file. + :rtype: Any + """ + warnings.warn( + "download_to_stream is deprecated, use readinto instead", + DeprecationWarning + ) + self._max_concurrency = max_concurrency + await self.readinto(stream) + return self.properties diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_file_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_file_client_async.py new file mode 100644 index 00000000000..d584ebe8087 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_file_client_async.py @@ -0,0 +1,1508 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines, invalid-overridden-method, too-many-public-methods +import functools +import sys +import time +import warnings +from datetime import datetime +from io import BytesIO +from typing import ( + Any, AnyStr, AsyncIterable, Dict, IO, Iterable, List, Optional, Tuple, Union, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from .._parser import _datetime_to_str, _get_file_permission +from .._shared.parser import _str +from .._generated.aio import AzureFileStorage +from .._generated.models import FileHTTPHeaders +from .._shared.policies_async import ExponentialRetry +from .._shared.uploads_async import AsyncIterStreamer, FileChunkUploader, IterStreamer, upload_data_chunks +from .._shared.base_client_async import AsyncStorageAccountHostsMixin +from .._shared.request_handlers import add_metadata_headers, get_length +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._deserialize import deserialize_file_properties, deserialize_file_stream, get_file_ranges_result +from .._serialize import ( + get_access_conditions, + get_api_version, + get_dest_access_conditions, + get_rename_smb_properties, + get_smb_properties, + get_source_access_conditions) +from .._file_client import ShareFileClient as ShareFileClientBase +from ._models import HandlesPaged +from ._lease_async import ShareLeaseClient +from ._download_async import StorageStreamDownloader + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from .._models import ContentSettings, FileProperties, Handle, NTFSAttributes + + +async def _upload_file_helper( + client, + stream, + size, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + file_settings, + file_attributes="none", + file_creation_time="now", + file_last_write_time="now", + file_permission=None, + file_permission_key=None, + progress_hook=None, + **kwargs +): + try: + if size is None or size < 0: + raise ValueError("A content size must be specified for a File.") + response = await client.create_file( + size, content_settings=content_settings, metadata=metadata, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + permission_key=file_permission_key, + timeout=timeout, + **kwargs + ) + if size == 0: + return response + + responses = await upload_data_chunks( + service=client, + uploader_class=FileChunkUploader, + total_size=size, + chunk_size=file_settings.max_range_size, + stream=stream, + max_concurrency=max_concurrency, + validate_content=validate_content, + progress_hook=progress_hook, + timeout=timeout, + **kwargs + ) + return sorted(responses, key=lambda r: r.get('last_modified'))[-1] + except HttpResponseError as error: + process_storage_error(error) + + +class ShareFileClient(AsyncStorageAccountHostsMixin, ShareFileClientBase): + """A client to interact with a specific file, although that file may not yet exist. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the + file, use the :func:`from_file_url` classmethod. + :param share_name: + The name of the share for the file. + :type share_name: str + :param str file_path: + The file path to the file with which to interact. If specified, this value will override + a file value specified in the file URL. + :param str snapshot: + An optional file snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`ShareClient.create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( + self, account_url: str, + share_name: str, + file_path: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + kwargs["retry_policy"] = kwargs.get("retry_policy") or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + if loop and sys.version_info >= (3, 8): + warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level" + "APIs in Python 3.8 and is no longer supported.", DeprecationWarning) + super(ShareFileClient, self).__init__( + account_url, share_name=share_name, file_path=file_path, snapshot=snapshot, + credential=credential, **kwargs + ) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @distributed_trace_async + async def acquire_lease(self, lease_id=None, **kwargs): + # type: (Optional[str], **Any) -> ShareLeaseClient + """Requests a new lease. + + If the file does not have an active lease, the File + Service creates a lease on the blob and returns a new lease. + + :param str lease_id: + Proposed lease ID, in a GUID string format. The File Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.aio.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/blob_samples_common.py + :start-after: [START acquire_lease_on_blob] + :end-before: [END acquire_lease_on_blob] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a blob. + """ + kwargs['lease_duration'] = -1 + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(**kwargs) + return lease + + @distributed_trace_async + async def create_file( # type: ignore + self, + size, # type: int + file_attributes="none", # type: Union[str, NTFSAttributes] + file_creation_time="now", # type: Optional[Union[str, datetime]] + file_last_write_time="now", # type: Optional[Union[str, datetime]] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Creates a new file. + + Note that it only initializes the file with no content. + + :param int size: Specifies the maximum size for the file, + up to 1 TB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START create_file] + :end-before: [END create_file] + :language: python + :dedent: 16 + :caption: Create a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + content_settings = kwargs.pop('content_settings', None) + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + file_http_headers = None + if content_settings: + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'Inherit') + file_change_time = kwargs.pop('file_change_time', None) + try: + return await self._client.file.create( # type: ignore + file_content_length=size, + metadata=metadata, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=permission_key, + file_http_headers=file_http_headers, + lease_access_conditions=access_conditions, + headers=headers, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_file( + self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]], + length: Optional[int] = None, + file_attributes: Union[str, "NTFSAttributes"] = "none", + file_creation_time: Optional[Union[str, datetime]] = "now", + file_last_write_time: Optional[Union[str, datetime]] = "now", + file_permission: Optional[str] = None, + permission_key: Optional[str] = None, + **kwargs + ) -> Dict[str, Any]: + """Uploads a new file. + + :param data: + Content of the file. + :param int length: + Length of the file in bytes. Specify its maximum size, up to 1 TiB. + :param file_attributes: + The file system attributes for files and directories. + If not set, the default value would be "None" and the attributes will be set to "Archive". + Here is an example for when the var type is str: 'Temporary|Archive'. + file_attributes value is not case sensitive. + :type file_attributes: str or ~azure.storage.fileshare.NTFSAttributes + :param file_creation_time: Creation time for the file + Default value: Now. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Now. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword dict(str,str) metadata: + Name-value pairs associated with the file as metadata. + :keyword ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :keyword bool validate_content: + If true, calculates an MD5 hash for each range of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword str encoding: + Defaults to UTF-8. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword progress_hook: + An async callback to track the progress of a long running upload. The signature is + function(current: int, total: Optional[int]) where current is the number of bytes transferred + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START upload_file] + :end-before: [END upload_file] + :language: python + :dedent: 16 + :caption: Upload a file. + """ + metadata = kwargs.pop('metadata', None) + content_settings = kwargs.pop('content_settings', None) + max_concurrency = kwargs.pop('max_concurrency', 1) + validate_content = kwargs.pop('validate_content', False) + progress_hook = kwargs.pop('progress_hook', None) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + + if isinstance(data, str): + data = data.encode(encoding) + if length is None: + length = get_length(data) + if isinstance(data, bytes): + data = data[:length] + + if isinstance(data, bytes): + stream = BytesIO(data) + elif hasattr(data, "read"): + stream = data + elif hasattr(data, "__iter__"): + stream = IterStreamer(data, encoding=encoding) + elif hasattr(data, '__aiter__'): + stream = AsyncIterStreamer(data, encoding=encoding) + else: + raise TypeError("Unsupported data type: {}".format(type(data))) + return await _upload_file_helper( + self, + stream, + length, + metadata, + content_settings, + validate_content, + timeout, + max_concurrency, + self._config, + file_attributes=file_attributes, + file_creation_time=file_creation_time, + file_last_write_time=file_last_write_time, + file_permission=file_permission, + file_permission_key=permission_key, + progress_hook=progress_hook, + **kwargs + ) + + @distributed_trace_async + async def start_copy_from_url(self, source_url, **kwargs): + # type: (str, Any) -> Any + """Initiates the copying of data from a source URL into the file + referenced by the client. + + The status of this copy operation can be found using the `get_properties` + method. + + :param str source_url: + Specifies the URL of the source file. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. This setting can be + used if Permission size is <= 8KB, otherwise permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword str permission_key: + Key of the permission to be set for the directory/file. + This value can be set to "source" to copy the security descriptor from the source file. + Otherwise if set, this value will be used to override the source value. If not set, permission value + is inherited from the parent directory of the target file. + Note: Only one of the file_permission or permission_key should be specified. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword file_attributes: + This value can be set to "source" to copy file attributes from the source file to the target file, + or to clear all attributes, it can be set to "None". Otherwise it can be set to a list of attributes + to set on the target file. If this is not set, the default value is "Archive". + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :keyword file_creation_time: + This value can be set to "source" to copy the creation time from the source file to the target file, + or a datetime to set as creation time on the target file. This could also be a string in ISO 8601 format. + If this is not set, creation time will be set to the date time value of the creation + (or when it was overwritten) of the target file by copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_creation_time: str or ~datetime.datetime + :keyword file_last_write_time: + This value can be set to "source" to copy the last write time from the source file to the target file, or + a datetime to set as the last write time on the target file. This could also be a string in ISO 8601 format. + If this is not set, value will be the last write time to the file by the copy engine. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :paramtype file_last_write_time: str or ~datetime.datetime + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.9.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword bool ignore_read_only: + Specifies the option to overwrite the target file if it already exists and has read-only attribute set. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword bool set_archive_attribute: + Specifies the option to set the archive attribute on the target file. + True means the archive attribute will be set on the target file despite attribute + overrides or the source file state. + + .. versionadded:: 12.1.0 + This parameter was introduced in API version '2019-07-07'. + + :keyword metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START copy_file_from_url] + :end-before: [END copy_file_from_url] + :language: python + :dedent: 16 + :caption: Copy a file from a URL + """ + metadata = kwargs.pop('metadata', None) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) + kwargs.update(get_smb_properties(kwargs)) + try: + return await self._client.file.start_copy( + source_url, + metadata=metadata, + lease_access_conditions=access_conditions, + headers=headers, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def abort_copy(self, copy_id, **kwargs): + # type: (Union[str, FileProperties], Any) -> None + """Abort an ongoing copy operation. + + This will leave a destination file with zero length and full metadata. + This will raise an error if the copy operation has already ended. + + :param copy_id: + The copy operation to abort. This can be either an ID, or an + instance of FileProperties. + :type copy_id: str or ~azure.storage.fileshare.FileProperties + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + copy_id = copy_id.copy.id + except AttributeError: + try: + copy_id = copy_id["copy_id"] + except TypeError: + pass + try: + await self._client.file.abort_copy(copy_id=copy_id, + lease_access_conditions=access_conditions, + timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def download_file( + self, + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> StorageStreamDownloader + """Downloads a file to the StorageStreamDownloader. The readall() method must + be used to read all the content or readinto() must be used to download the file into + a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks. + + :param int offset: + Start of byte range to use for downloading a section of the file. + Must be set if length is provided. + :param int length: + Number of bytes to read from the stream. This is optional, but + should be supplied for optimal performance. + :keyword int max_concurrency: + Maximum number of parallel connections to use. + :keyword bool validate_content: + If true, calculates an MD5 hash for each chunk of the file. The storage + service checks the hash of the content that has arrived with the hash + that was sent. This is primarily valuable for detecting bitflips on + the wire if using http instead of https as https (the default) will + already validate. Note that this MD5 hash is not stored with the + file. Also note that if enabled, the memory-efficient upload algorithm + will not be used, because computing the MD5 hash requires buffering + entire blocks, and doing so defeats the purpose of the memory-efficient algorithm. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword progress_hook: + An async callback to track the progress of a long running download. The signature is + function(current: int, total: int) where current is the number of bytes transferred + so far, and total is the size of the blob or None if the size is unknown. + :paramtype progress_hook: Callable[[int, int], Awaitable[None]] + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A streaming object (StorageStreamDownloader) + :rtype: ~azure.storage.fileshare.aio.StorageStreamDownloader + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START download_file] + :end-before: [END download_file] + :language: python + :dedent: 16 + :caption: Download a file. + """ + if length is not None and offset is None: + raise ValueError("Offset value must not be None if length is set.") + + range_end = None + if length is not None: + range_end = offset + length - 1 # Service actually uses an end-range inclusive index + + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + + downloader = StorageStreamDownloader( + client=self._client.file, + config=self._config, + start_range=offset, + end_range=range_end, + name=self.file_name, + path='/'.join(self.file_path), + share=self.share_name, + lease_access_conditions=access_conditions, + cls=deserialize_file_stream, + **kwargs + ) + await downloader._setup() # pylint: disable=protected-access + return downloader + + @distributed_trace_async + async def delete_file(self, **kwargs): + # type: (Any) -> None + """Marks the specified file for deletion. The file is + later deleted during garbage collection. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_client_async.py + :start-after: [START delete_file] + :end-before: [END delete_file] + :language: python + :dedent: 16 + :caption: Delete a file. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + await self._client.file.delete(lease_access_conditions=access_conditions, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def rename_file( + self, new_name, # type: str + **kwargs # type: Any + ): + # type: (...) -> ShareFileClient + """ + Rename the source file. + + :param str new_name: + The new file name. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword bool overwrite: + A boolean value for if the destination file already exists, whether this request will + overwrite the file or not. If true, the rename will succeed and will overwrite the + destination file. If not provided or if false and the destination file does exist, the + request will not overwrite the destination file. If provided and the destination file + doesn't exist, the rename will succeed. + :keyword bool ignore_read_only: + A boolean value that specifies whether the ReadOnly attribute on a preexisting destination + file should be respected. If true, the rename will succeed, otherwise, a previous file at the + destination with the ReadOnly attribute set will cause the rename to fail. + :keyword str file_permission: + If specified the permission (security descriptor) shall be set for the file. This header + can be used if Permission size is <= 8KB, else file_permission_key shall be used. + If SDDL is specified as input, it must have owner, group and dacl. + A value of 'preserve' can be passed to preserve source permissions. + Note: Only one of the file_permission or file_permission_key should be specified. + :keyword str file_permission_key: + Key of the permission to be set for the file. + Note: Only one of the file-permission or file-permission-key should be specified. + :keyword file_attributes: + The file system attributes for the file. + :paramtype file_attributes:~azure.storage.fileshare.NTFSAttributes or str + :keyword file_creation_time: + Creation time for the file. + :paramtype file_creation_time:~datetime.datetime or str + :keyword file_last_write_time: + Last write time for the file. + :paramtype file_last_write_time:~datetime.datetime or str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword str content_type: + The Content Type of the new file. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :keyword Dict[str,str] metadata: + A name-value pair to associate with a file storage object. + :keyword source_lease: + Required if the source file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype source_lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword destination_lease: + Required if the destination file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype destination_lease: ~azure.storage.fileshare.ShareLeaseClient or str + :returns: The new File Client. + :rtype: ~azure.storage.fileshare.ShareFileClient + """ + if not new_name: + raise ValueError("Please specify a new file name.") + + new_name = new_name.strip('/') + new_path_and_query = new_name.split('?') + new_file_path = new_path_and_query[0] + if len(new_path_and_query) == 2: + new_file_sas = new_path_and_query[1] or self._query_str.strip('?') + else: + new_file_sas = self._query_str.strip('?') + + new_file_client = ShareFileClient( + '{}://{}'.format(self.scheme, self.primary_hostname), self.share_name, new_file_path, + credential=new_file_sas or self.credential, api_version=self.api_version, + _hosts=self._hosts, _configuration=self._config, _pipeline=self._pipeline, + _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent + ) + + kwargs.update(get_rename_smb_properties(kwargs)) + + file_http_headers = None + content_type = kwargs.pop('content_type', None) + if content_type: + file_http_headers = FileHTTPHeaders( + file_content_type=content_type + ) + + timeout = kwargs.pop('timeout', None) + overwrite = kwargs.pop('overwrite', None) + metadata = kwargs.pop('metadata', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + + source_access_conditions = get_source_access_conditions(kwargs.pop('source_lease', None)) + dest_access_conditions = get_dest_access_conditions(kwargs.pop('destination_lease', None)) + + try: + await new_file_client._client.file.rename( # pylint: disable=protected-access + self.url, + timeout=timeout, + replace_if_exists=overwrite, + file_http_headers=file_http_headers, + source_lease_access_conditions=source_access_conditions, + destination_lease_access_conditions=dest_access_conditions, + headers=headers, + **kwargs) + + return new_file_client + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_file_properties(self, **kwargs): + # type: (Any) -> FileProperties + """Returns all user-defined metadata, standard HTTP properties, and + system properties for the file. + + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: FileProperties + :rtype: ~azure.storage.fileshare.FileProperties + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + file_props = await self._client.file.get_properties( + sharesnapshot=self.snapshot, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=deserialize_file_properties, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + file_props.name = self.file_name + file_props.share = self.share_name + file_props.snapshot = self.snapshot + file_props.path = "/".join(self.file_path) + return file_props # type: ignore + + @distributed_trace_async + async def set_http_headers(self, content_settings, # type: ContentSettings + file_attributes="preserve", # type: Union[str, NTFSAttributes] + file_creation_time="preserve", # type: Optional[Union[str, datetime]] + file_last_write_time="preserve", # type: Optional[Union[str, datetime]] + file_permission=None, # type: Optional[str] + permission_key=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Dict[str, Any] + """Sets HTTP headers on the file. + + :param ~azure.storage.fileshare.ContentSettings content_settings: + ContentSettings object used to set file properties. Used to set content type, encoding, + language, disposition, md5, and cache control. + :param file_attributes: + The file system attributes for files and directories. + If not set, indicates preservation of existing values. + Here is an example for when the var type is str: 'Temporary|Archive' + :type file_attributes: str or :class:`~azure.storage.fileshare.NTFSAttributes` + :param file_creation_time: Creation time for the file + Default value: Preserve. + :type file_creation_time: str or ~datetime.datetime + :param file_last_write_time: Last write time for the file + Default value: Preserve. + :type file_last_write_time: str or ~datetime.datetime + :param file_permission: If specified the permission (security + descriptor) shall be set for the directory/file. This header can be + used if Permission size is <= 8KB, else x-ms-file-permission-key + header shall be used. Default value: Inherit. If SDDL is specified as + input, it must have owner, group and dacl. Note: Only one of the + x-ms-file-permission or x-ms-file-permission-key should be specified. + :type file_permission: str + :param permission_key: Key of the permission to be set for the + directory/file. Note: Only one of the x-ms-file-permission or + x-ms-file-permission-key should be specified. + :type permission_key: str + :keyword file_change_time: + Change time for the file. If not specified, change time will be set to the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_change_time: str or ~datetime.datetime + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + file_content_length = kwargs.pop("size", None) + file_http_headers = FileHTTPHeaders( + file_cache_control=content_settings.cache_control, + file_content_type=content_settings.content_type, + file_content_md5=bytearray(content_settings.content_md5) if content_settings.content_md5 else None, + file_content_encoding=content_settings.content_encoding, + file_content_language=content_settings.content_language, + file_content_disposition=content_settings.content_disposition, + ) + file_permission = _get_file_permission(file_permission, permission_key, 'preserve') + file_change_time = kwargs.pop('file_change_time', None) + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=file_content_length, + file_http_headers=file_http_headers, + file_attributes=_str(file_attributes), + file_creation_time=_datetime_to_str(file_creation_time), + file_last_write_time=_datetime_to_str(file_last_write_time), + file_change_time=_datetime_to_str(file_change_time), + file_permission=file_permission, + file_permission_key=permission_key, + lease_access_conditions=access_conditions, + timeout=timeout, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_file_metadata(self, metadata=None, **kwargs): # type: ignore + # type: (Optional[Dict[str, Any]], Any) -> Dict[str, Any] + """Sets user-defined metadata for the specified file as one or more + name-value pairs. + + Each call to this operation replaces all existing metadata + attached to the file. To remove all metadata from the file, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the file as metadata. + :type metadata: dict(str, str) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop("headers", {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.file.set_metadata( # type: ignore + metadata=metadata, lease_access_conditions=access_conditions, + timeout=timeout, cls=return_response_headers, headers=headers, **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range( # type: ignore + self, + data, # type: bytes + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Upload a range of bytes to a file. + + :param bytes data: + The data to upload. + :param int offset: + Start of byte range to use for uploading a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for uploading a section of the file. + The range can be up to 4 MB in size. + :keyword bool validate_content: + If true, calculates an MD5 hash of the page content. The storage + service checks the hash of the content that has arrived + with the hash that was sent. This is primarily valuable for detecting + bitflips on the wire if using http instead of https as https (the default) + will already validate. Note that this MD5 hash is not stored with the + file. + :keyword file_last_write_mode: + If the file last write time should be preserved or overwritten. Possible values + are "preserve" or "now". If not specified, file last write time will be changed to + the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_last_write_mode: Literal["preserve", "now"] + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str encoding: + Defaults to UTF-8. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + validate_content = kwargs.pop('validate_content', False) + timeout = kwargs.pop('timeout', None) + encoding = kwargs.pop('encoding', 'UTF-8') + file_last_write_mode = kwargs.pop('file_last_write_mode', None) + if isinstance(data, str): + data = data.encode(encoding) + end_range = offset + length - 1 # Reformat to an inclusive range index + content_range = 'bytes={0}-{1}'.format(offset, end_range) + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + try: + return await self._client.file.upload_range( # type: ignore + range=content_range, + content_length=length, + optionalbody=data, + timeout=timeout, + validate_content=validate_content, + file_last_written_mode=file_last_write_mode, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def upload_range_from_url(self, source_url, + offset, + length, + source_offset, + **kwargs + ): + # type: (str, int, int, int, **Any) -> Dict[str, Any] + """ + Writes the bytes from one Azure File endpoint into the specified range of another Azure File endpoint. + + :param int offset: + Start of byte range to use for updating a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for updating a section of the file. + The range can be up to 4 MB in size. + :param str source_url: + A URL of up to 2 KB in length that specifies an Azure file or blob. + The value should be URL-encoded as it would appear in a request URI. + If the source is in another account, the source must either be public + or must be authenticated via a shared access signature. If the source + is public, no authentication is required. + Examples: + https://myaccount.file.core.windows.net/myshare/mydir/myfile + https://otheraccount.file.core.windows.net/myshare/mydir/myfile?sastoken + :param int source_offset: + This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source. + The service will read the same number of bytes as the destination range (length-offset). + :keyword ~datetime.datetime source_if_modified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source + blob has been modified since the specified date/time. + :keyword ~datetime.datetime source_if_unmodified_since: + A DateTime value. Azure expects the date value passed in to be UTC. + If timezone is included, any non-UTC datetimes will be converted to UTC. + If a date is passed in without timezone info, it is assumed to be UTC. + Specify this conditional header to copy the blob only if the source blob + has not been modified since the specified date/time. + :keyword str source_etag: + The source ETag value, or the wildcard character (*). Used to check if the resource has changed, + and act according to the condition specified by the `match_condition` parameter. + :keyword ~azure.core.MatchConditions source_match_condition: + The source match condition to use upon the etag. + :keyword file_last_write_mode: + If the file last write time should be preserved or overwritten. Possible values + are "preserve" or "now". If not specified, file last write time will be changed to + the current date/time. + + .. versionadded:: 12.8.0 + This parameter was introduced in API version '2021-06-08'. + + :paramtype file_last_write_mode: Literal["preserve", "now"] + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword str source_authorization: + Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is + the prefix of the source_authorization string. + """ + options = self._upload_range_from_url_options( + source_url=source_url, + offset=offset, + length=length, + source_offset=source_offset, + **kwargs + ) + try: + return await self._client.file.upload_range_from_url(**options) # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_ranges( # type: ignore + self, offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> List[Dict[str, int]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A list of valid ranges. + :rtype: List[dict[str, int]] + """ + options = self._get_ranges_options( + offset=offset, + length=length, + **kwargs) + try: + ranges = await self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return [{'start': file_range.start, 'end': file_range.end} for file_range in ranges.ranges] + + @distributed_trace_async + async def get_ranges_diff( # type: ignore + self, + previous_sharesnapshot, # type: Union[str, Dict[str, Any]] + offset=None, # type: Optional[int] + length=None, # type: Optional[int] + **kwargs # type: Any + ): + # type: (...) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]] + """Returns the list of valid page ranges for a file or snapshot + of a file. + + .. versionadded:: 12.6.0 + + :param int offset: + Specifies the start offset of bytes over which to get ranges. + :param int length: + Number of bytes to use over which to get ranges. + :param str previous_sharesnapshot: + The snapshot diff parameter that contains an opaque DateTime value that + specifies a previous file snapshot to be compared + against a more recent snapshot or the current file. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :paramtype lease: ~azure.storage.fileshare.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + A tuple of two lists of file ranges as dictionaries with 'start' and 'end' keys. + The first element are filled file ranges, the 2nd element is cleared file ranges. + :rtype: tuple(list(dict(str, str), list(dict(str, str)) + """ + options = self._get_ranges_options( + offset=offset, + length=length, + previous_sharesnapshot=previous_sharesnapshot, + **kwargs) + try: + ranges = await self._client.file.get_range_list(**options) + except HttpResponseError as error: + process_storage_error(error) + return get_file_ranges_result(ranges) + + @distributed_trace_async + async def clear_range( # type: ignore + self, + offset, # type: int + length, # type: int + **kwargs + ): + # type: (...) -> Dict[str, Any] + """Clears the specified range and releases the space used in storage for + that range. + + :param int offset: + Start of byte range to use for clearing a section of the file. + The range can be up to 4 MB in size. + :param int length: + Number of bytes to use for clearing a section of the file. + The range can be up to 4 MB in size. + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + + if offset is None or offset % 512 != 0: + raise ValueError("offset must be an integer that aligns with 512 bytes file size") + if length is None or length % 512 != 0: + raise ValueError("length must be an integer that aligns with 512 bytes file size") + end_range = length + offset - 1 # Reformat to an inclusive range index + content_range = "bytes={0}-{1}".format(offset, end_range) + try: + return await self._client.file.upload_range( # type: ignore + timeout=timeout, + cls=return_response_headers, + content_length=0, + optionalbody=None, + file_range_write="clear", + range=content_range, + lease_access_conditions=access_conditions, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def resize_file(self, size, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Resizes a file to the specified size. + + :param int size: + Size to resize file to (in bytes) + :keyword lease: + Required if the file has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.1.0 + + :paramtype lease: ~azure.storage.fileshare.aio.ShareLeaseClient or str + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: File-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.file.set_http_headers( # type: ignore + file_content_length=size, + file_attributes="preserve", + file_creation_time="preserve", + file_last_write_time="preserve", + file_permission="preserve", + lease_access_conditions=access_conditions, + cls=return_response_headers, + timeout=timeout, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_handles(self, **kwargs): + # type: (Any) -> AsyncItemPaged + """Lists handles for file. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of Handle + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.Handle] + """ + timeout = kwargs.pop('timeout', None) + results_per_page = kwargs.pop("results_per_page", None) + command = functools.partial( + self._client.file.list_handles, + sharesnapshot=self.snapshot, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, results_per_page=results_per_page, + page_iterator_class=HandlesPaged) + + @distributed_trace_async + async def close_handle(self, handle, **kwargs): + # type: (Union[str, Handle], Any) -> Dict[str, int] + """Close an open file handle. + + :param handle: + A specific handle to close. + :type handle: str or ~azure.storage.fileshare.Handle + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + try: + handle_id = handle.id # type: ignore + except AttributeError: + handle_id = handle + if handle_id == '*': + raise ValueError("Handle ID '*' is not supported. Use 'close_all_handles' instead.") + try: + response = await self._client.file.force_close_handles( + handle_id, + marker=None, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + return { + 'closed_handles_count': response.get('number_of_handles_closed', 0), + 'failed_handles_count': response.get('number_of_handles_failed', 0) + } + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def close_all_handles(self, **kwargs): + # type: (Any) -> Dict[str, int] + """Close any open file handles. + + This operation will block until the service has closed all open handles. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: + The number of handles closed (this may be 0 if the specified handle was not found) + and the number of handles failed to close in a dict. + :rtype: dict[str, int] + """ + timeout = kwargs.pop('timeout', None) + start_time = time.time() + + try_close = True + continuation_token = None + total_closed = 0 + total_failed = 0 + while try_close: + try: + response = await self._client.file.force_close_handles( + handle_id='*', + timeout=timeout, + marker=continuation_token, + sharesnapshot=self.snapshot, + cls=return_response_headers, + **kwargs + ) + except HttpResponseError as error: + process_storage_error(error) + continuation_token = response.get('marker') + try_close = bool(continuation_token) + total_closed += response.get('number_of_handles_closed', 0) + total_failed += response.get('number_of_handles_failed', 0) + if timeout: + timeout = max(0, timeout - (time.time() - start_time)) + return { + 'closed_handles_count': total_closed, + 'failed_handles_count': total_failed + } diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_lease_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_lease_async.py new file mode 100644 index 00000000000..4e8bc167bc2 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_lease_async.py @@ -0,0 +1,248 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +from typing import ( # pylint: disable=unused-import + Union, Optional, Any, IO, Iterable, AnyStr, Dict, List, Tuple, + TypeVar, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator_async import distributed_trace_async + +from .._shared.response_handlers import return_response_headers, process_storage_error +from .._generated.aio.operations import FileOperations, ShareOperations +from .._lease import ShareLeaseClient as LeaseClientBase + +if TYPE_CHECKING: + from datetime import datetime + ShareFileClient = TypeVar("ShareFileClient") + ShareClient = TypeVar("ShareClient") + + +class ShareLeaseClient(LeaseClientBase): + """Creates a new ShareLeaseClient. + + This client provides lease operations on a ShareClient or ShareFileClient. + + :ivar str id: + The ID of the lease currently being maintained. This will be `None` if no + lease has yet been acquired. + :ivar str etag: + The ETag of the lease currently being maintained. This will be `None` if no + lease has yet been acquired or modified. + :ivar ~datetime.datetime last_modified: + The last modified timestamp of the lease currently being maintained. + This will be `None` if no lease has yet been acquired or modified. + + :param client: + The client of the file or share to lease. + :type client: ~azure.storage.fileshare.ShareFileClient or + ~azure.storage.fileshare.ShareClient + :param str lease_id: + A string representing the lease ID of an existing lease. This value does not + need to be specified in order to acquire a new lease, or break one. + """ + + def __enter__(self): + raise TypeError("Async lease must use 'async with'.") + + def __exit__(self, *args): + self.release() + + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + await self.release() + + @distributed_trace_async + async def acquire(self, **kwargs): + # type: (**Any) -> None + """Requests a new lease. This operation establishes and manages a lock on a + file or share for write and delete operations. If the file or share does not have an active lease, + the File or Share service creates a lease on the file or share. If the file has an active lease, + you can only request a new lease using the active lease ID. + + + If the file or share does not have an active lease, the File or Share service creates a + lease on the file and returns a new lease ID. + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. File leases never expire. A non-infinite share lease can be + between 15 and 60 seconds. A share lease duration cannot be changed + using renew or change. Default is -1 (infinite share lease). + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + try: + lease_duration = kwargs.pop('lease_duration', -1) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.acquire_lease( + timeout=kwargs.pop('timeout', None), + duration=lease_duration, + proposed_lease_id=self.id, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + self.etag = response.get('etag') # type: str + + @distributed_trace_async + async def renew(self, **kwargs): + # type: (Any) -> None + """Renews the share lease. + + The share lease can be renewed if the lease ID specified in the + lease client matches that associated with the share. Note that + the lease may be renewed even if it has expired as long as the share + has not been leased again since the expiration of that lease. When you + renew a lease, the lease duration clock resets. + + .. versionadded:: 12.6.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + if isinstance(self._client, FileOperations): + raise TypeError("Lease renewal operations are only valid for ShareClient.") + try: + response = await self._client.renew_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + sharesnapshot=self._snapshot, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def release(self, **kwargs): + # type: (Any) -> None + """Releases the lease. The lease may be released if the lease ID specified on the request matches + that associated with the share or file. Releasing the lease allows another client to immediately acquire + the lease for the share or file as soon as the release is complete. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.release_lease( + lease_id=self.id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def change(self, proposed_lease_id, **kwargs): + # type: (str, Any) -> None + """ Changes the lease ID of an active lease. A change must include the current lease ID in x-ms-lease-id and + a new lease ID in x-ms-proposed-lease-id. + + :param str proposed_lease_id: + Proposed lease ID, in a GUID string format. The File or Share service raises an error + (Invalid request) if the proposed lease ID is not in the correct format. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: None + """ + try: + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + response = await self._client.change_lease( + lease_id=self.id, + proposed_lease_id=proposed_lease_id, + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + self.etag = response.get('etag') # type: str + self.id = response.get('lease_id') # type: str + self.last_modified = response.get('last_modified') # type: datetime + + @distributed_trace_async + async def break_lease(self, **kwargs): + # type: (Any) -> int + """Force breaks the lease if the file or share has an active lease. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. An infinite lease breaks immediately. + + Once a lease is broken, it cannot be changed. Any authorized request can break the lease; + the request is not required to specify a matching lease ID. + When a lease is successfully broken, the response indicates the interval + in seconds until a new lease can be acquired. + + :keyword int lease_break_period: + This is the proposed duration of seconds that the share lease + should continue before it is broken, between 0 and 60 seconds. This + break period is only used if it is shorter than the time remaining + on the share lease. If longer, the time remaining on the share lease is used. + A new share lease will not be available before the break period has + expired, but the share lease may be held for longer than the break + period. If this header does not appear with a break + operation, a fixed-duration share lease breaks after the remaining share lease + period elapses, and an infinite share lease breaks immediately. + + .. versionadded:: 12.5.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :return: Approximate time remaining in the lease period, in seconds. + :rtype: int + """ + try: + lease_break_period = kwargs.pop('lease_break_period', None) + if self._snapshot: + kwargs['sharesnapshot'] = self._snapshot + if isinstance(self._client, ShareOperations): + kwargs['break_period'] = lease_break_period + if isinstance(self._client, FileOperations) and lease_break_period: + raise TypeError("Setting a lease break period is only applicable to Share leases.") + + response = await self._client.break_lease( + timeout=kwargs.pop('timeout', None), + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return response.get('lease_time') # type: ignore diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_models.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_models.py new file mode 100644 index 00000000000..e943afc8ef3 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_models.py @@ -0,0 +1,179 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-few-public-methods, too-many-instance-attributes +# pylint: disable=super-init-not-called, too-many-lines + +from azure.core.async_paging import AsyncPageIterator +from azure.core.exceptions import HttpResponseError + +from .._shared.response_handlers import return_context_and_deserialized, process_storage_error +from .._generated.models import DirectoryItem +from .._models import Handle, ShareProperties, DirectoryProperties, FileProperties + + +def _wrap_item(item): + if isinstance(item, DirectoryItem): + return {'name': item.name, 'is_directory': True} + return {'name': item.name, 'size': item.properties.content_length, 'is_directory': False} + + +class SharePropertiesPaged(AsyncPageIterator): + """An iterable of Share properties. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.ShareProperties) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only shares whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(SharePropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + prefix=self.prefix, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [ShareProperties._from_generated(i) for i in self._response.share_items] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class HandlesPaged(AsyncPageIterator): + """An iterable of Handles. + + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str marker: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(~azure.storage.fileshare.Handle) + + :param callable command: Function to retrieve the next page of items. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, results_per_page=None, continuation_token=None): + super(HandlesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.current_page = [Handle._from_generated(h) for h in self._response.handle_list] # pylint: disable=protected-access + return self._response.next_marker or None, self.current_page + + +class DirectoryPropertiesPaged(AsyncPageIterator): + """An iterable for the contents of a directory. + + This iterable will yield dicts for the contents of the directory. The dicts + will have the keys 'name' (str) and 'is_directory' (bool). + Items that are files (is_directory=False) will have an additional 'content_length' key. + + :ivar str service_endpoint: The service URL. + :ivar str prefix: A file name prefix being used to filter the list. + :ivar str marker: The continuation token of the current page of results. + :ivar int results_per_page: The maximum number of results retrieved per API call. + :ivar str continuation_token: The continuation token to retrieve the next page of results. + :ivar str location_mode: The location mode being used to list results. The available + options include "primary" and "secondary". + :ivar current_page: The current page of listed results. + :vartype current_page: list(dict(str, Any)) + + :param callable command: Function to retrieve the next page of items. + :param str prefix: Filters the results to return only directories whose names + begin with the specified prefix. + :param int results_per_page: The maximum number of share names to retrieve per + call. + :param str continuation_token: An opaque continuation token. + """ + def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None): + super(DirectoryPropertiesPaged, self).__init__( + get_next=self._get_next_cb, + extract_data=self._extract_data_cb, + continuation_token=continuation_token or "" + ) + self._command = command + self.service_endpoint = None + self.prefix = prefix + self.marker = None + self.results_per_page = results_per_page + self.location_mode = None + self.current_page = [] + + async def _get_next_cb(self, continuation_token): + try: + return await self._command( + marker=continuation_token or None, + prefix=self.prefix, + maxresults=self.results_per_page, + cls=return_context_and_deserialized, + use_location=self.location_mode) + except HttpResponseError as error: + process_storage_error(error) + + async def _extract_data_cb(self, get_next_return): + self.location_mode, self._response = get_next_return + self.service_endpoint = self._response.service_endpoint + self.prefix = self._response.prefix + self.marker = self._response.marker + self.results_per_page = self._response.max_results + self.current_page = [DirectoryProperties._from_generated(i) for i in self._response.segment.directory_items] # pylint: disable = protected-access + self.current_page.extend([FileProperties._from_generated(i) for i in self._response.segment.file_items]) # pylint: disable = protected-access + return self._response.next_marker or None, self.current_page diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_share_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_share_client_async.py new file mode 100644 index 00000000000..481987d5d08 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_share_client_async.py @@ -0,0 +1,850 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import warnings +import sys +from typing import ( # pylint: disable=unused-import + Optional, Union, Dict, Any, Iterable, TYPE_CHECKING +) + +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.pipeline import AsyncPipeline +from .._shared.policies_async import ExponentialRetry +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.request_handlers import add_metadata_headers, serialize_iso +from .._shared.response_handlers import ( + return_response_headers, + process_storage_error, + return_headers_and_deserialized) +from .._generated.aio import AzureFileStorage +from .._generated.models import ( + SignedIdentifier, + DeleteSnapshotsOptionType) +from .._deserialize import deserialize_share_properties, deserialize_permission +from .._serialize import get_api_version, get_access_conditions +from .._share_client import ShareClient as ShareClientBase +from ._directory_client_async import ShareDirectoryClient +from ._file_client_async import ShareFileClient +from ..aio._lease_async import ShareLeaseClient +from .._models import ShareProtocols + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from .._models import ShareProperties, AccessPolicy + + +class ShareClient(AsyncStorageAccountHostsMixin, ShareClientBase): + """A client to interact with a specific share, although that share may not yet exist. + + For operations relating to a specific directory or file in this share, the clients for + those entities can also be retrieved using the :func:`get_directory_client` and :func:`get_file_client` functions. + + :param str account_url: + The URI to the storage account. In order to create a client given the full URI to the share, + use the :func:`from_share_url` classmethod. + :param share_name: + The name of the share with which to interact. + :type share_name: str + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + """ + def __init__( + self, account_url: str, + share_name: str, + snapshot: Optional[Union[str, Dict[str, Any]]] = None, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + if loop and sys.version_info >= (3, 8): + warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level" + "APIs in Python 3.8 and is no longer supported.", DeprecationWarning) + super(ShareClient, self).__init__( + account_url, + share_name=share_name, + snapshot=snapshot, + credential=credential, + **kwargs) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + def get_directory_client(self, directory_path=None): + # type: (Optional[str]) -> ShareDirectoryClient + """Get a client to interact with the specified directory. + The directory need not already exist. + + :param str directory_path: + Path to the specified directory. + :returns: A Directory Client. + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareDirectoryClient( + self.url, share_name=self.share_name, directory_path=directory_path or "", snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent) + + def get_file_client(self, file_path): + # type: (str) -> ShareFileClient + """Get a client to interact with the specified file. + The file need not already exist. + + :param str file_path: + Path to the specified file. + :returns: A File Client. + :rtype: ~azure.storage.fileshare.aio.ShareFileClient + """ + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + + return ShareFileClient( + self.url, share_name=self.share_name, file_path=file_path, snapshot=self.snapshot, + credential=self.credential, api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent) + + @distributed_trace_async() + async def acquire_lease(self, **kwargs): + # type: (**Any) -> ShareLeaseClient + """Requests a new lease. + + If the share does not have an active lease, the Share + Service creates a lease on the share and returns a new lease. + + .. versionadded:: 12.5.0 + + :keyword int lease_duration: + Specifies the duration of the lease, in seconds, or negative one + (-1) for a lease that never expires. A non-infinite lease can be + between 15 and 60 seconds. A lease duration cannot be changed + using renew or change. Default is -1 (infinite lease). + :keyword str lease_id: + Proposed lease ID, in a GUID string format. The Share Service + returns 400 (Invalid request) if the proposed lease ID is not + in the correct format. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A ShareLeaseClient object. + :rtype: ~azure.storage.fileshare.ShareLeaseClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share.py + :start-after: [START acquire_lease_on_share] + :end-before: [END acquire_lease_on_share] + :language: python + :dedent: 8 + :caption: Acquiring a lease on a share. + """ + kwargs['lease_duration'] = kwargs.pop('lease_duration', -1) + lease_id = kwargs.pop('lease_id', None) + lease = ShareLeaseClient(self, lease_id=lease_id) # type: ignore + await lease.acquire(**kwargs) + return lease + + @distributed_trace_async + async def create_share(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Creates a new Share under the account. If a share with the + same name already exists, the operation fails. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int quota: + The quota to be allotted. + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + + .. versionadded:: 12.4.0 + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword protocols: + Protocols to enable on the share. Only one protocol can be enabled on the share. + :paramtype protocols: str or ~azure.storage.fileshare.ShareProtocols + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash'. + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :returns: Share-updated property dict (Etag and last modified). + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share] + :end-before: [END create_share] + :language: python + :dedent: 12 + :caption: Creates a file share. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + access_tier = kwargs.pop('access_tier', None) + timeout = kwargs.pop('timeout', None) + root_squash = kwargs.pop('root_squash', None) + protocols = kwargs.pop('protocols', None) + if protocols and protocols not in ['NFS', 'SMB', ShareProtocols.SMB, ShareProtocols.NFS]: + raise ValueError("The enabled protocol must be set to either SMB or NFS.") + if root_squash and protocols not in ['NFS', ShareProtocols.NFS]: + raise ValueError("The 'root_squash' keyword can only be used on NFS enabled shares.") + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + + try: + return await self._client.share.create( # type: ignore + timeout=timeout, + metadata=metadata, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + enabled_protocols=protocols, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_snapshot( # type: ignore + self, + **kwargs # type: Optional[Any] + ): + # type: (...) -> Dict[str, Any] + """Creates a snapshot of the share. + + A snapshot is a read-only version of a share that's taken at a point in time. + It can be read, copied, or deleted, but not modified. Snapshots provide a way + to back up a share as it appears at a moment in time. + + A snapshot of a share has the same name as the base share from which the snapshot + is taken, with a DateTime value appended to indicate the time at which the + snapshot was taken. + + :keyword dict(str,str) metadata: + Name-value pairs associated with the share as metadata. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: Share-updated property dict (Snapshot ID, Etag, and last modified). + :rtype: dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START create_share_snapshot] + :end-before: [END create_share_snapshot] + :language: python + :dedent: 16 + :caption: Creates a snapshot of the file share. + """ + metadata = kwargs.pop('metadata', None) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) # type: ignore + try: + return await self._client.share.create_snapshot( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def delete_share( + self, delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START delete_share] + :end-before: [END delete_share] + :language: python + :dedent: 16 + :caption: Deletes the share and any snapshots. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + delete_include = None + if delete_snapshots: + delete_include = DeleteSnapshotsOptionType.include + try: + await self._client.share.delete( + timeout=timeout, + sharesnapshot=self.snapshot, + delete_snapshots=delete_include, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_properties(self, **kwargs): + # type: (Any) -> ShareProperties + """Returns all user-defined metadata and system properties for the + specified share. The data returned does not include the shares's + list of files or directories. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: The share properties. + :rtype: ~azure.storage.fileshare.ShareProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_hello_world_async.py + :start-after: [START get_share_properties] + :end-before: [END get_share_properties] + :language: python + :dedent: 16 + :caption: Gets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + props = await self._client.share.get_properties( + timeout=timeout, + sharesnapshot=self.snapshot, + cls=deserialize_share_properties, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + props.name = self.share_name + props.snapshot = self.snapshot + return props # type: ignore + + @distributed_trace_async + async def set_share_quota(self, quota, **kwargs): + # type: (int, Any) -> Dict[str, Any] + """Sets the quota for the share. + + :param int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_quota] + :end-before: [END set_share_quota] + :language: python + :dedent: 16 + :caption: Sets the share quota. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=None, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + async def set_share_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Sets the share properties. + + .. versionadded:: 12.3.0 + + :keyword access_tier: + Specifies the access tier of the share. + Possible values: 'TransactionOptimized', 'Hot', and 'Cool' + :paramtype access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier + :keyword int quota: + Specifies the maximum size of the share, in gigabytes. + Must be greater than 0, and less than or equal to 5TB. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword root_squash: + Root squash to set on the share. + Only valid for NFS shares. Possible values include: 'NoRootSquash', 'RootSquash', 'AllSquash' + :paramtype root_squash: str or ~azure.storage.fileshare.ShareRootSquash + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_properties] + :end-before: [END set_share_properties] + :language: python + :dedent: 16 + :caption: Sets the share properties. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + access_tier = kwargs.pop('access_tier', None) + quota = kwargs.pop('quota', None) + root_squash = kwargs.pop('root_squash', None) + if all(parameter is None for parameter in [access_tier, quota, root_squash]): + raise ValueError("set_share_properties should be called with at least one parameter.") + try: + return await self._client.share.set_properties( # type: ignore + timeout=timeout, + quota=quota, + access_tier=access_tier, + root_squash=root_squash, + lease_access_conditions=access_conditions, + cls=return_response_headers, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_share_metadata(self, metadata, **kwargs): + # type: (Dict[str, Any], Any) -> Dict[str, Any] + """Sets the metadata for the share. + + Each call to this operation replaces all existing metadata + attached to the share. To remove all metadata from the share, + call this operation with no metadata dict. + + :param metadata: + Name-value pairs associated with the share as metadata. + :type metadata: dict(str, str) + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START set_share_metadata] + :end-before: [END set_share_metadata] + :language: python + :dedent: 16 + :caption: Sets the share metadata. + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + headers = kwargs.pop('headers', {}) + headers.update(add_metadata_headers(metadata)) + try: + return await self._client.share.set_metadata( # type: ignore + timeout=timeout, + cls=return_response_headers, + headers=headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_access_policy(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the permissions for the share. The permissions + indicate whether files in a share may be accessed publicly. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Access policy information in a dict. + :rtype: dict[str, Any] + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + response, identifiers = await self._client.share.get_access_policy( + timeout=timeout, + cls=return_headers_and_deserialized, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + return { + 'public_access': response.get('share_public_access'), + 'signed_identifiers': identifiers or [] + } + + @distributed_trace_async + async def set_share_access_policy(self, signed_identifiers, **kwargs): + # type: (Dict[str, AccessPolicy], Any) -> Dict[str, str] + """Sets the permissions for the share, or stored access + policies that may be used with Shared Access Signatures. The permissions + indicate whether files in a share may be accessed publicly. + + :param signed_identifiers: + A dictionary of access policies to associate with the share. The + dictionary may contain up to 5 elements. An empty dictionary + will clear the access policies set on the service. + :type signed_identifiers: dict(str, :class:`~azure.storage.fileshare.AccessPolicy`) + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :returns: Share-updated property dict (Etag and last modified). + :rtype: dict(str, Any) + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + if len(signed_identifiers) > 5: + raise ValueError( + 'Too many access policies provided. The server does not support setting ' + 'more than 5 access policies on a single resource.') + identifiers = [] + for key, value in signed_identifiers.items(): + if value: + value.start = serialize_iso(value.start) + value.expiry = serialize_iso(value.expiry) + identifiers.append(SignedIdentifier(id=key, access_policy=value)) + signed_identifiers = identifiers # type: ignore + + try: + return await self._client.share.set_access_policy( # type: ignore + share_acl=signed_identifiers or None, + timeout=timeout, + cls=return_response_headers, + lease_access_conditions=access_conditions, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_share_stats(self, **kwargs): + # type: (Any) -> int + """Gets the approximate size of the data stored on the share in bytes. + + Note that this value may not include all recently created + or recently re-sized files. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :keyword lease: + Required if the share has an active lease. Value can be a ShareLeaseClient object + or the lease ID as a string. + + .. versionadded:: 12.5.0 + This keyword argument was introduced in API version '2020-08-04'. + + :return: The approximate size of the data (in bytes) stored on the share. + :rtype: int + """ + access_conditions = get_access_conditions(kwargs.pop('lease', None)) + timeout = kwargs.pop('timeout', None) + try: + stats = await self._client.share.get_statistics( + timeout=timeout, + lease_access_conditions=access_conditions, + **kwargs) + return stats.share_usage_bytes # type: ignore + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_directories_and_files( # type: ignore + self, directory_name=None, # type: Optional[str] + name_starts_with=None, # type: Optional[str] + marker=None, # type: Optional[str] + **kwargs # type: Any + ): + # type: (...) -> Iterable[Dict[str,str]] + """Lists the directories and files under the share. + + :param str directory_name: + Name of a directory. + :param str name_starts_with: + Filters the results to return only directories whose names + begin with the specified prefix. + :param str marker: + An opaque continuation token. This value can be retrieved from the + next_marker field of a previous generator object. If specified, + this generator will begin returning results from this point. + :keyword list[str] include: + Include this parameter to specify one or more datasets to include in the response. + Possible str values are "timestamps", "Etag", "Attributes", "PermissionKey". + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword bool include_extended_info: + If this is set to true, file id will be returned in listed results. + + .. versionadded:: 12.6.0 + This keyword argument was introduced in API version '2020-10-02'. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An auto-paging iterable of dict-like DirectoryProperties and FileProperties + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_share_async.py + :start-after: [START share_list_files_in_dir] + :end-before: [END share_list_files_in_dir] + :language: python + :dedent: 16 + :caption: List directories and files in the share. + """ + timeout = kwargs.pop('timeout', None) + directory = self.get_directory_client(directory_name) + return directory.list_directories_and_files( + name_starts_with=name_starts_with, marker=marker, timeout=timeout, **kwargs) + + @distributed_trace_async + async def create_permission_for_share(self, file_permission, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Create a permission (a security descriptor) at the share level. + + This 'permission' can be used for the files/directories in the share. + If a 'permission' already exists, it shall return the key of it, else + creates a new permission at the share level and return its key. + + :param str file_permission: + File permission, a Portable SDDL + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A file permission key + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + options = self._create_permission_for_share_options(file_permission, timeout=timeout, **kwargs) + try: + return await self._client.share.create_permission(**options) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def get_permission_for_share( # type: ignore + self, permission_key, # type: str + **kwargs # type: Any + ): + # type: (...) -> str + """Get a permission (a security descriptor) for a given key. + + This 'permission' can be used for the files/directories in the share. + + :param str permission_key: + Key of the file permission to retrieve + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A file permission (a portable SDDL) + :rtype: str + """ + timeout = kwargs.pop('timeout', None) + try: + return await self._client.share.get_permission( # type: ignore + file_permission_key=permission_key, + cls=deserialize_permission, + timeout=timeout, + **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def create_directory(self, directory_name, **kwargs): + # type: (str, Any) -> ShareDirectoryClient + """Creates a directory in the share and returns a client to interact + with the directory. + + :param str directory_name: + The name of the directory. + :keyword dict(str,str) metadata: + Name-value pairs associated with the directory as metadata. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: ShareDirectoryClient + :rtype: ~azure.storage.fileshare.aio.ShareDirectoryClient + """ + directory = self.get_directory_client(directory_name) + kwargs.setdefault('merge_span', True) + await directory.create_directory(**kwargs) + return directory # type: ignore + + @distributed_trace_async + async def delete_directory(self, directory_name, **kwargs): + # type: (str, Any) -> None + """Marks the directory for deletion. The directory is + later deleted during garbage collection. + + :param str directory_name: + The name of the directory. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + """ + directory = self.get_directory_client(directory_name) + await directory.delete_directory(**kwargs) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_share_service_client_async.py b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_share_service_client_async.py new file mode 100644 index 00000000000..967f58bd842 --- /dev/null +++ b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/aio/_share_service_client_async.py @@ -0,0 +1,419 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=invalid-overridden-method +import functools +import sys +import warnings +from typing import ( + Union, Optional, Any, Dict, List, + TYPE_CHECKING +) + +from azure.core.async_paging import AsyncItemPaged +from azure.core.exceptions import HttpResponseError +from azure.core.tracing.decorator import distributed_trace +from azure.core.pipeline import AsyncPipeline +from azure.core.tracing.decorator_async import distributed_trace_async +from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper +from .._shared.response_handlers import process_storage_error +from .._shared.policies_async import ExponentialRetry +from .._generated.aio import AzureFileStorage +from .._generated.models import StorageServiceProperties +from .._share_service_client import ShareServiceClient as ShareServiceClientBase +from .._serialize import get_api_version +from ._share_client_async import ShareClient +from ._models import SharePropertiesPaged +from .._models import service_properties_deserialize + +if sys.version_info >= (3, 8): + from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports +else: + from typing_extensions import Literal # pylint: disable=ungrouped-imports + +if TYPE_CHECKING: + from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential, TokenCredential + from .._models import ( + ShareProperties, + Metrics, + CorsRule, + ShareProtocolSettings, + ) + + +class ShareServiceClient(AsyncStorageAccountHostsMixin, ShareServiceClientBase): + """A client to interact with the File Share Service at the account level. + + This client provides operations to retrieve and configure the account properties + as well as list, create and delete shares within the account. + For operations relating to a specific share, a client for that entity + can also be retrieved using the :func:`get_share_client` function. + + :param str account_url: + The URL to the file share storage account. Any other entities included + in the URL path (e.g. share or file) will be discarded. This URL can be optionally + authenticated with a SAS token. + :param credential: + The credentials with which to authenticate. This is optional if the + account URL already has a SAS token. The value can be a SAS token string, + an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials, + an account shared access key, or an instance of a TokenCredentials class from azure.identity. + If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential + - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError. + If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key" + should be the storage account key. + :keyword token_intent: + Required when using `TokenCredential` for authentication and ignored for other forms of authentication. + Specifies the intent for all requests when using `TokenCredential` authentication. Possible values are: + + backup - Specifies requests are intended for backup/admin type operations, meaning that all file/directory + ACLs are bypassed and full permissions are granted. User must also have required RBAC permission. + + :paramtype token_intent: Literal['backup'] + :keyword bool allow_trailing_dot: If true, the trailing dot will not be trimmed from the target URI. + :keyword bool allow_source_trailing_dot: If true, the trailing dot will not be trimmed from the source URI. + :keyword str api_version: + The Storage API version to use for requests. Default value is the most recent service version that is + compatible with the current SDK. Setting to an older version may result in reduced feature compatibility. + + .. versionadded:: 12.1.0 + + :keyword str secondary_hostname: + The hostname of the secondary endpoint. + :keyword int max_range_size: The maximum range size used for a file upload. Defaults to 4*1024*1024. + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_authentication_async.py + :start-after: [START create_share_service_client] + :end-before: [END create_share_service_client] + :language: python + :dedent: 8 + :caption: Create the share service client with url and credential. + """ + def __init__( + self, account_url: str, + credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "TokenCredential"]] = None, # pylint: disable=line-too-long + *, + token_intent: Optional[Literal['backup']] = None, + **kwargs: Any + ) -> None: + kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs) + loop = kwargs.pop('loop', None) + if loop and sys.version_info >= (3, 8): + warnings.warn("The 'loop' parameter was deprecated from asyncio's high-level" + "APIs in Python 3.8 and is no longer supported.", DeprecationWarning) + super(ShareServiceClient, self).__init__( + account_url, + credential=credential, + **kwargs) + self.allow_trailing_dot = kwargs.pop('allow_trailing_dot', None) + self.allow_source_trailing_dot = kwargs.pop('allow_source_trailing_dot', None) + self.file_request_intent = token_intent + self._client = AzureFileStorage(url=self.url, base_url=self.url, pipeline=self._pipeline, + allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, + file_request_intent=self.file_request_intent) + self._client._config.version = get_api_version(kwargs) # pylint: disable=protected-access + + @distributed_trace_async + async def get_service_properties(self, **kwargs): + # type: (Any) -> Dict[str, Any] + """Gets the properties of a storage account's File Share service, including + Azure Storage Analytics. + + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: A dictionary containing file service properties such as + analytics logging, hour/minute metrics, cors rules, etc. + :rtype: Dict[str, Any] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_service_properties] + :end-before: [END get_service_properties] + :language: python + :dedent: 12 + :caption: Get file share service properties. + """ + timeout = kwargs.pop('timeout', None) + try: + service_props = await self._client.service.get_properties(timeout=timeout, **kwargs) + return service_properties_deserialize(service_props) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace_async + async def set_service_properties( + self, hour_metrics=None, # type: Optional[Metrics] + minute_metrics=None, # type: Optional[Metrics] + cors=None, # type: Optional[List[CorsRule]] + protocol=None, # type: Optional[ShareProtocolSettings] + **kwargs + ): + # type: (...) -> None + """Sets the properties of a storage account's File Share service, including + Azure Storage Analytics. If an element (e.g. hour_metrics) is left as None, the + existing settings on the service for that functionality are preserved. + + :param hour_metrics: + The hour metrics settings provide a summary of request + statistics grouped by API in hourly aggregates for files. + :type hour_metrics: ~azure.storage.fileshare.Metrics + :param minute_metrics: + The minute metrics settings provide request statistics + for each minute for files. + :type minute_metrics: ~azure.storage.fileshare.Metrics + :param cors: + You can include up to five CorsRule elements in the + list. If an empty list is specified, all CORS rules will be deleted, + and CORS will be disabled for the service. + :type cors: list(:class:`~azure.storage.fileshare.CorsRule`) + :param protocol_settings: + Sets protocol settings + :type protocol: ~azure.storage.fileshare.ShareProtocolSettings + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START set_service_properties] + :end-before: [END set_service_properties] + :language: python + :dedent: 8 + :caption: Sets file share service properties. + """ + timeout = kwargs.pop('timeout', None) + props = StorageServiceProperties( + hour_metrics=hour_metrics, + minute_metrics=minute_metrics, + cors=cors, + protocol=protocol + ) + try: + await self._client.service.set_properties(props, timeout=timeout, **kwargs) + except HttpResponseError as error: + process_storage_error(error) + + @distributed_trace + def list_shares( + self, name_starts_with=None, # type: Optional[str] + include_metadata=False, # type: Optional[bool] + include_snapshots=False, # type: Optional[bool] + **kwargs # type: Any + ): # type: (...) -> AsyncItemPaged + """Returns auto-paging iterable of dict-like ShareProperties under the specified account. + The generator will lazily follow the continuation tokens returned by + the service and stop when all shares have been returned. + + :param str name_starts_with: + Filters the results to return only shares whose names + begin with the specified name_starts_with. + :param bool include_metadata: + Specifies that share metadata be returned in the response. + :param bool include_snapshots: + Specifies that share snapshot be returned in the response. + :keyword bool include_deleted: + Specifies that deleted shares be returned in the response. + This is only for share soft delete enabled account. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :returns: An iterable (auto-paging) of ShareProperties. + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.fileshare.ShareProperties] + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_list_shares] + :end-before: [END fsc_list_shares] + :language: python + :dedent: 16 + :caption: List shares in the file share service. + """ + timeout = kwargs.pop('timeout', None) + include = [] + include_deleted = kwargs.pop('include_deleted', None) + if include_deleted: + include.append("deleted") + if include_metadata: + include.append('metadata') + if include_snapshots: + include.append('snapshots') + + results_per_page = kwargs.pop('results_per_page', None) + command = functools.partial( + self._client.service.list_shares_segment, + include=include, + timeout=timeout, + **kwargs) + return AsyncItemPaged( + command, prefix=name_starts_with, results_per_page=results_per_page, + page_iterator_class=SharePropertiesPaged) + + @distributed_trace_async + async def create_share( + self, share_name, # type: str + **kwargs + ): + # type: (...) -> ShareClient + """Creates a new share under the specified account. If the share + with the same name already exists, the operation fails. Returns a client with + which to interact with the newly created share. + + :param str share_name: The name of the share to create. + :keyword dict(str,str) metadata: + A dict with name_value pairs to associate with the + share as metadata. Example:{'Category':'test'} + :keyword int quota: + Quota in bytes. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_create_shares] + :end-before: [END fsc_create_shares] + :language: python + :dedent: 12 + :caption: Create a share in the file share service. + """ + metadata = kwargs.pop('metadata', None) + quota = kwargs.pop('quota', None) + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.create_share(metadata=metadata, quota=quota, timeout=timeout, **kwargs) + return share + + @distributed_trace_async + async def delete_share( + self, share_name, # type: Union[ShareProperties, str] + delete_snapshots=False, # type: Optional[bool] + **kwargs + ): + # type: (...) -> None + """Marks the specified share for deletion. The share is + later deleted during garbage collection. + + :param share_name: + The share to delete. This can either be the name of the share, + or an instance of ShareProperties. + :type share_name: str or ~azure.storage.fileshare.ShareProperties + :param bool delete_snapshots: + Indicates if snapshots are to be deleted. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: None + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START fsc_delete_shares] + :end-before: [END fsc_delete_shares] + :language: python + :dedent: 16 + :caption: Delete a share in the file share service. + """ + timeout = kwargs.pop('timeout', None) + share = self.get_share_client(share_name) + kwargs.setdefault('merge_span', True) + await share.delete_share( + delete_snapshots=delete_snapshots, timeout=timeout, **kwargs) + + @distributed_trace_async + async def undelete_share(self, deleted_share_name, deleted_share_version, **kwargs): + # type: (str, str, **Any) -> ShareClient + """Restores soft-deleted share. + + Operation will only be successful if used within the specified number of days + set in the delete retention policy. + + .. versionadded:: 12.2.0 + This operation was introduced in API version '2019-12-12'. + + :param str deleted_share_name: + Specifies the name of the deleted share to restore. + :param str deleted_share_version: + Specifies the version of the deleted share to restore. + :keyword int timeout: + Sets the server-side timeout for the operation in seconds. For more details see + https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-file-service-operations. + This value is not tracked or validated on the client. To configure client-side network timesouts + see `here `_. + :rtype: ~azure.storage.fileshare.aio.ShareClient + """ + share = self.get_share_client(deleted_share_name) + try: + await share._client.share.restore(deleted_share_name=deleted_share_name, # pylint: disable = protected-access + deleted_share_version=deleted_share_version, + timeout=kwargs.pop('timeout', None), **kwargs) + return share + except HttpResponseError as error: + process_storage_error(error) + + def get_share_client(self, share, snapshot=None): + # type: (Union[ShareProperties, str],Optional[Union[Dict[str, Any], str]]) -> ShareClient + """Get a client to interact with the specified share. + The share need not already exist. + + :param share: + The share. This can either be the name of the share, + or an instance of ShareProperties. + :type share: str or ~azure.storage.fileshare.ShareProperties + :param str snapshot: + An optional share snapshot on which to operate. This can be the snapshot ID string + or the response returned from :func:`create_snapshot`. + :returns: A ShareClient. + :rtype: ~azure.storage.fileshare.aio.ShareClient + + .. admonition:: Example: + + .. literalinclude:: ../samples/file_samples_service_async.py + :start-after: [START get_share_client] + :end-before: [END get_share_client] + :language: python + :dedent: 8 + :caption: Gets the share client. + """ + try: + share_name = share.name + except AttributeError: + share_name = share + + _pipeline = AsyncPipeline( + transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access + policies=self._pipeline._impl_policies # pylint: disable = protected-access + ) + return ShareClient( + self.url, share_name=share_name, snapshot=snapshot, credential=self.credential, + api_version=self.api_version, _hosts=self._hosts, _configuration=self._config, + _pipeline=_pipeline, _location_mode=self._location_mode, allow_trailing_dot=self.allow_trailing_dot, + allow_source_trailing_dot=self.allow_source_trailing_dot, token_intent=self.file_request_intent) diff --git a/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/py.typed b/src/aosm/azext_aosm/vendored_sdks/azure_storagev2/fileshare/v2022_11_02/py.typed new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/aosm/setup.py b/src/aosm/setup.py index 3e72089cde9..0ab04eea759 100644 --- a/src/aosm/setup.py +++ b/src/aosm/setup.py @@ -16,7 +16,7 @@ # Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = "1.0.0b2" +VERSION = "1.0.0b3" # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers @@ -31,7 +31,7 @@ "License :: OSI Approved :: MIT License", ] -DEPENDENCIES = ["oras~=0.1.19", "azure-storage-blob>=12.15.0", "jinja2>=3.1.2"] +DEPENDENCIES = ["oras~=0.1.19", "jinja2>=3.1.2"] with open("README.md", "r", encoding="utf-8") as f: README = f.read()