diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/__init__.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/__init__.py index da4d07e1222e..bfdd6ac1c466 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/__init__.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/__init__.py @@ -9,10 +9,11 @@ # regenerated. # -------------------------------------------------------------------------- -from .batch_management_client import BatchManagementClient -from .version import VERSION +from ._configuration import BatchManagementClientConfiguration +from ._batch_management_client import BatchManagementClient +__all__ = ['BatchManagementClient', 'BatchManagementClientConfiguration'] -__all__ = ['BatchManagementClient'] +from .version import VERSION __version__ = VERSION diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/batch_management_client.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/_batch_management_client.py similarity index 61% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/batch_management_client.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/_batch_management_client.py index d31cfa110ab2..6cdcc5ca3eaf 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/batch_management_client.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/_batch_management_client.py @@ -9,54 +9,21 @@ # regenerated. # -------------------------------------------------------------------------- -from msrest.service_client import ServiceClient +from msrest.service_client import SDKClient from msrest import Serializer, Deserializer -from msrestazure import AzureConfiguration -from .version import VERSION -from .operations.batch_account_operations import BatchAccountOperations -from .operations.application_package_operations import ApplicationPackageOperations -from .operations.application_operations import ApplicationOperations -from .operations.location_operations import LocationOperations -from .operations.operations import Operations -from .operations.certificate_operations import CertificateOperations -from .operations.pool_operations import PoolOperations -from . import models - - -class BatchManagementClientConfiguration(AzureConfiguration): - """Configuration for BatchManagementClient - Note that all parameters used to create this instance are saved as instance - attributes. - :param credentials: Credentials needed for the client to connect to Azure. - :type credentials: :mod:`A msrestazure Credentials - object` - :param subscription_id: The Azure subscription ID. This is a - GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000) - :type subscription_id: str - :param str base_url: Service URL - """ - - def __init__( - self, credentials, subscription_id, base_url=None): - - if credentials is None: - raise ValueError("Parameter 'credentials' must not be None.") - if subscription_id is None: - raise ValueError("Parameter 'subscription_id' must not be None.") - if not base_url: - base_url = 'https://management.azure.com' - - super(BatchManagementClientConfiguration, self).__init__(base_url) - - self.add_user_agent('azure-mgmt-batch/{}'.format(VERSION)) - self.add_user_agent('Azure-SDK-For-Python') - - self.credentials = credentials - self.subscription_id = subscription_id +from ._configuration import BatchManagementClientConfiguration +from .operations import BatchAccountOperations +from .operations import ApplicationPackageOperations +from .operations import ApplicationOperations +from .operations import LocationOperations +from .operations import Operations +from .operations import CertificateOperations +from .operations import PoolOperations +from . import models -class BatchManagementClient(object): +class BatchManagementClient(SDKClient): """BatchManagementClient :ivar config: Configuration for client. @@ -90,10 +57,10 @@ def __init__( self, credentials, subscription_id, base_url=None): self.config = BatchManagementClientConfiguration(credentials, subscription_id, base_url) - self._client = ServiceClient(self.config.credentials, self.config) + super(BatchManagementClient, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2018-12-01' + self.api_version = '2019-04-01' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/_configuration.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/_configuration.py new file mode 100644 index 000000000000..75c9d4004f40 --- /dev/null +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/_configuration.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- +from msrestazure import AzureConfiguration + +from .version import VERSION + + +class BatchManagementClientConfiguration(AzureConfiguration): + """Configuration for BatchManagementClient + Note that all parameters used to create this instance are saved as instance + attributes. + + :param credentials: Credentials needed for the client to connect to Azure. + :type credentials: :mod:`A msrestazure Credentials + object` + :param subscription_id: The Azure subscription ID. This is a + GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000) + :type subscription_id: str + :param str base_url: Service URL + """ + + def __init__( + self, credentials, subscription_id, base_url=None): + + if credentials is None: + raise ValueError("Parameter 'credentials' must not be None.") + if subscription_id is None: + raise ValueError("Parameter 'subscription_id' must not be None.") + if not base_url: + base_url = 'https://management.azure.com' + + super(BatchManagementClientConfiguration, self).__init__(base_url) + + # Starting Autorest.Python 4.0.64, make connection pool activated by default + self.keep_alive = True + + self.add_user_agent('azure-mgmt-batch/{}'.format(VERSION)) + self.add_user_agent('Azure-SDK-For-Python') + + self.credentials = credentials + self.subscription_id = subscription_id diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/__init__.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/__init__.py index ad7c3fe653cf..b17bdd9834c9 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/__init__.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/__init__.py @@ -9,21 +9,26 @@ # regenerated. # -------------------------------------------------------------------------- - try: from ._models_py3 import ActivateApplicationPackageParameters + from ._models_py3 import Application + from ._models_py3 import ApplicationPackage from ._models_py3 import ApplicationPackageReference from ._models_py3 import AutoScaleRun from ._models_py3 import AutoScaleRunError from ._models_py3 import AutoScaleSettings from ._models_py3 import AutoStorageBaseProperties + from ._models_py3 import AutoStorageProperties from ._models_py3 import AutoUserSpecification + from ._models_py3 import BatchAccount from ._models_py3 import BatchAccountCreateParameters from ._models_py3 import BatchAccountKeys from ._models_py3 import BatchAccountRegenerateKeyParameters from ._models_py3 import BatchAccountUpdateParameters from ._models_py3 import BatchLocationQuota + from ._models_py3 import Certificate from ._models_py3 import CertificateBaseProperties + from ._models_py3 import CertificateCreateOrUpdateParameters from ._models_py3 import CertificateReference from ._models_py3 import CheckNameAvailabilityParameters from ._models_py3 import CheckNameAvailabilityResult @@ -44,6 +49,7 @@ from ._models_py3 import NetworkSecurityGroupRule from ._models_py3 import Operation from ._models_py3 import OperationDisplay + from ._models_py3 import Pool from ._models_py3 import PoolEndpointConfiguration from ._models_py3 import ProxyResource from ._models_py3 import ResizeError @@ -57,29 +63,29 @@ from ._models_py3 import UserAccount from ._models_py3 import UserIdentity from ._models_py3 import VirtualMachineConfiguration + from ._models_py3 import VirtualMachineFamilyCoreQuota from ._models_py3 import WindowsConfiguration from ._models_py3 import WindowsUserConfiguration - from ._models_py3 import Application - from ._models_py3 import ApplicationPackage - from ._models_py3 import AutoStorageProperties - from ._models_py3 import BatchAccount - from ._models_py3 import Certificate - from ._models_py3 import CertificateCreateOrUpdateParameters - from ._models_py3 import Pool except (SyntaxError, ImportError): from ._models import ActivateApplicationPackageParameters + from ._models import Application + from ._models import ApplicationPackage from ._models import ApplicationPackageReference from ._models import AutoScaleRun from ._models import AutoScaleRunError from ._models import AutoScaleSettings from ._models import AutoStorageBaseProperties + from ._models import AutoStorageProperties from ._models import AutoUserSpecification + from ._models import BatchAccount from ._models import BatchAccountCreateParameters from ._models import BatchAccountKeys from ._models import BatchAccountRegenerateKeyParameters from ._models import BatchAccountUpdateParameters from ._models import BatchLocationQuota + from ._models import Certificate from ._models import CertificateBaseProperties + from ._models import CertificateCreateOrUpdateParameters from ._models import CertificateReference from ._models import CheckNameAvailabilityParameters from ._models import CheckNameAvailabilityResult @@ -100,6 +106,7 @@ from ._models import NetworkSecurityGroupRule from ._models import Operation from ._models import OperationDisplay + from ._models import Pool from ._models import PoolEndpointConfiguration from ._models import ProxyResource from ._models import ResizeError @@ -113,58 +120,59 @@ from ._models import UserAccount from ._models import UserIdentity from ._models import VirtualMachineConfiguration + from ._models import VirtualMachineFamilyCoreQuota from ._models import WindowsConfiguration from ._models import WindowsUserConfiguration - from ._models import Application - from ._models import ApplicationPackage - from ._models import AutoStorageProperties - from ._models import BatchAccount - from ._models import Certificate - from ._models import CertificateCreateOrUpdateParameters - from ._models import Pool -from ._paged_models import BatchAccountPaged from ._paged_models import ApplicationPackagePaged from ._paged_models import ApplicationPaged -from ._paged_models import OperationPaged +from ._paged_models import BatchAccountPaged from ._paged_models import CertificatePaged +from ._paged_models import OperationPaged from ._paged_models import PoolPaged -from ._batch_management_client_enums import PoolAllocationMode -from ._batch_management_client_enums import ProvisioningState -from ._batch_management_client_enums import AccountKeyType -from ._batch_management_client_enums import PackageState -from ._batch_management_client_enums import CertificateFormat -from ._batch_management_client_enums import CertificateProvisioningState -from ._batch_management_client_enums import PoolProvisioningState -from ._batch_management_client_enums import AllocationState -from ._batch_management_client_enums import CachingType -from ._batch_management_client_enums import StorageAccountType -from ._batch_management_client_enums import ComputeNodeDeallocationOption -from ._batch_management_client_enums import InterNodeCommunicationState -from ._batch_management_client_enums import InboundEndpointProtocol -from ._batch_management_client_enums import NetworkSecurityGroupRuleAccess -from ._batch_management_client_enums import ComputeNodeFillType -from ._batch_management_client_enums import ElevationLevel -from ._batch_management_client_enums import LoginMode -from ._batch_management_client_enums import AutoUserScope -from ._batch_management_client_enums import CertificateStoreLocation -from ._batch_management_client_enums import CertificateVisibility -from ._batch_management_client_enums import NameAvailabilityReason +from ._batch_management_client_enums import ( + PoolAllocationMode, + ProvisioningState, + AccountKeyType, + PackageState, + CertificateFormat, + CertificateProvisioningState, + PoolProvisioningState, + AllocationState, + CachingType, + StorageAccountType, + ComputeNodeDeallocationOption, + InterNodeCommunicationState, + InboundEndpointProtocol, + NetworkSecurityGroupRuleAccess, + ComputeNodeFillType, + ElevationLevel, + LoginMode, + AutoUserScope, + CertificateStoreLocation, + CertificateVisibility, + NameAvailabilityReason, +) - -__all__=[ +__all__ = [ 'ActivateApplicationPackageParameters', + 'Application', + 'ApplicationPackage', 'ApplicationPackageReference', 'AutoScaleRun', 'AutoScaleRunError', 'AutoScaleSettings', 'AutoStorageBaseProperties', + 'AutoStorageProperties', 'AutoUserSpecification', + 'BatchAccount', 'BatchAccountCreateParameters', 'BatchAccountKeys', 'BatchAccountRegenerateKeyParameters', 'BatchAccountUpdateParameters', 'BatchLocationQuota', + 'Certificate', 'CertificateBaseProperties', + 'CertificateCreateOrUpdateParameters', 'CertificateReference', 'CheckNameAvailabilityParameters', 'CheckNameAvailabilityResult', @@ -185,6 +193,7 @@ 'NetworkSecurityGroupRule', 'Operation', 'OperationDisplay', + 'Pool', 'PoolEndpointConfiguration', 'ProxyResource', 'ResizeError', @@ -198,15 +207,9 @@ 'UserAccount', 'UserIdentity', 'VirtualMachineConfiguration', + 'VirtualMachineFamilyCoreQuota', 'WindowsConfiguration', 'WindowsUserConfiguration', - 'Application', - 'ApplicationPackage', - 'AutoStorageProperties', - 'BatchAccount', - 'Certificate', - 'CertificateCreateOrUpdateParameters', - 'Pool', 'BatchAccountPaged', 'ApplicationPackagePaged', 'ApplicationPaged', diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_batch_management_client_enums.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_batch_management_client_enums.py index da2eba944432..0bd6782557aa 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_batch_management_client_enums.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_batch_management_client_enums.py @@ -12,137 +12,137 @@ from enum import Enum -class PoolAllocationMode(Enum): +class PoolAllocationMode(str, Enum): - batch_service = "BatchService" - user_subscription = "UserSubscription" + batch_service = "BatchService" #: Pools will be allocated in subscriptions owned by the Batch service. + user_subscription = "UserSubscription" #: Pools will be allocated in a subscription owned by the user. -class ProvisioningState(Enum): +class ProvisioningState(str, Enum): - invalid = "Invalid" - creating = "Creating" - deleting = "Deleting" - succeeded = "Succeeded" - failed = "Failed" - cancelled = "Cancelled" + invalid = "Invalid" #: The account is in an invalid state. + creating = "Creating" #: The account is being created. + deleting = "Deleting" #: The account is being deleted. + succeeded = "Succeeded" #: The account has been created and is ready for use. + failed = "Failed" #: The last operation for the account is failed. + cancelled = "Cancelled" #: The last operation for the account is cancelled. -class AccountKeyType(Enum): +class AccountKeyType(str, Enum): - primary = "Primary" - secondary = "Secondary" + primary = "Primary" #: The primary account key. + secondary = "Secondary" #: The secondary account key. -class PackageState(Enum): +class PackageState(str, Enum): - pending = "Pending" - active = "Active" + pending = "Pending" #: The application package has been created but has not yet been activated. + active = "Active" #: The application package is ready for use. -class CertificateFormat(Enum): +class CertificateFormat(str, Enum): - pfx = "Pfx" - cer = "Cer" + pfx = "Pfx" #: The certificate is a PFX (PKCS#12) formatted certificate or certificate chain. + cer = "Cer" #: The certificate is a base64-encoded X.509 certificate. -class CertificateProvisioningState(Enum): +class CertificateProvisioningState(str, Enum): - succeeded = "Succeeded" - deleting = "Deleting" - failed = "Failed" + succeeded = "Succeeded" #: The certificate is available for use in pools. + deleting = "Deleting" #: The user has requested that the certificate be deleted, but the delete operation has not yet completed. You may not reference the certificate when creating or updating pools. + failed = "Failed" #: The user requested that the certificate be deleted, but there are pools that still have references to the certificate, or it is still installed on one or more compute nodes. (The latter can occur if the certificate has been removed from the pool, but the node has not yet restarted. Nodes refresh their certificates only when they restart.) You may use the cancel certificate delete operation to cancel the delete, or the delete certificate operation to retry the delete. -class PoolProvisioningState(Enum): +class PoolProvisioningState(str, Enum): - succeeded = "Succeeded" - deleting = "Deleting" + succeeded = "Succeeded" #: The pool is available to run tasks subject to the availability of compute nodes. + deleting = "Deleting" #: The user has requested that the pool be deleted, but the delete operation has not yet completed. -class AllocationState(Enum): +class AllocationState(str, Enum): - steady = "Steady" - resizing = "Resizing" - stopping = "Stopping" + steady = "Steady" #: The pool is not resizing. There are no changes to the number of nodes in the pool in progress. A pool enters this state when it is created and when no operations are being performed on the pool to change the number of nodes. + resizing = "Resizing" #: The pool is resizing; that is, compute nodes are being added to or removed from the pool. + stopping = "Stopping" #: The pool was resizing, but the user has requested that the resize be stopped, but the stop request has not yet been completed. -class CachingType(Enum): +class CachingType(str, Enum): - none = "None" - read_only = "ReadOnly" - read_write = "ReadWrite" + none = "None" #: The caching mode for the disk is not enabled. + read_only = "ReadOnly" #: The caching mode for the disk is read only. + read_write = "ReadWrite" #: The caching mode for the disk is read and write. -class StorageAccountType(Enum): +class StorageAccountType(str, Enum): - standard_lrs = "Standard_LRS" - premium_lrs = "Premium_LRS" + standard_lrs = "Standard_LRS" #: The data disk should use standard locally redundant storage. + premium_lrs = "Premium_LRS" #: The data disk should use premium locally redundant storage. -class ComputeNodeDeallocationOption(Enum): +class ComputeNodeDeallocationOption(str, Enum): - requeue = "Requeue" - terminate = "Terminate" - task_completion = "TaskCompletion" - retained_data = "RetainedData" + requeue = "Requeue" #: Terminate running task processes and requeue the tasks. The tasks will run again when a node is available. Remove nodes as soon as tasks have been terminated. + terminate = "Terminate" #: Terminate running tasks. The tasks will be completed with failureInfo indicating that they were terminated, and will not run again. Remove nodes as soon as tasks have been terminated. + task_completion = "TaskCompletion" #: Allow currently running tasks to complete. Schedule no new tasks while waiting. Remove nodes when all tasks have completed. + retained_data = "RetainedData" #: Allow currently running tasks to complete, then wait for all task data retention periods to expire. Schedule no new tasks while waiting. Remove nodes when all task retention periods have expired. -class InterNodeCommunicationState(Enum): +class InterNodeCommunicationState(str, Enum): - enabled = "Enabled" - disabled = "Disabled" + enabled = "Enabled" #: Enable network communication between virtual machines. + disabled = "Disabled" #: Disable network communication between virtual machines. -class InboundEndpointProtocol(Enum): +class InboundEndpointProtocol(str, Enum): - tcp = "TCP" - udp = "UDP" + tcp = "TCP" #: Use TCP for the endpoint. + udp = "UDP" #: Use UDP for the endpoint. -class NetworkSecurityGroupRuleAccess(Enum): +class NetworkSecurityGroupRuleAccess(str, Enum): - allow = "Allow" - deny = "Deny" + allow = "Allow" #: Allow access. + deny = "Deny" #: Deny access. -class ComputeNodeFillType(Enum): +class ComputeNodeFillType(str, Enum): - spread = "Spread" - pack = "Pack" + spread = "Spread" #: Tasks should be assigned evenly across all nodes in the pool. + pack = "Pack" #: As many tasks as possible (maxTasksPerNode) should be assigned to each node in the pool before any tasks are assigned to the next node in the pool. -class ElevationLevel(Enum): +class ElevationLevel(str, Enum): - non_admin = "NonAdmin" - admin = "Admin" + non_admin = "NonAdmin" #: The user is a standard user without elevated access. + admin = "Admin" #: The user is a user with elevated access and operates with full Administrator permissions. -class LoginMode(Enum): +class LoginMode(str, Enum): - batch = "Batch" - interactive = "Interactive" + batch = "Batch" #: The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes. + interactive = "Interactive" #: The LOGON32_LOGON_INTERACTIVE Win32 login mode. Some applications require having permissions associated with the interactive login mode. If this is the case for an application used in your task, then this option is recommended. -class AutoUserScope(Enum): +class AutoUserScope(str, Enum): - task = "Task" - pool = "Pool" + task = "Task" #: Specifies that the service should create a new user for the task. + pool = "Pool" #: Specifies that the task runs as the common auto user account which is created on every node in a pool. -class CertificateStoreLocation(Enum): +class CertificateStoreLocation(str, Enum): - current_user = "CurrentUser" - local_machine = "LocalMachine" + current_user = "CurrentUser" #: Certificates should be installed to the CurrentUser certificate store. + local_machine = "LocalMachine" #: Certificates should be installed to the LocalMachine certificate store. -class CertificateVisibility(Enum): +class CertificateVisibility(str, Enum): - start_task = "StartTask" - task = "Task" - remote_user = "RemoteUser" + start_task = "StartTask" #: The certificate should be visible to the user account under which the start task is run. + task = "Task" #: The certificate should be visible to the user accounts under which job tasks are run. + remote_user = "RemoteUser" #: The certificate should be visible to the user accounts under which users remotely access the node. -class NameAvailabilityReason(Enum): +class NameAvailabilityReason(str, Enum): - invalid = "Invalid" - already_exists = "AlreadyExists" + invalid = "Invalid" #: The requested name is invalid. + already_exists = "AlreadyExists" #: The requested name is already in use. diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models.py index c64ee92d4c21..4b01ce194f4f 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models.py @@ -16,7 +16,10 @@ class ActivateApplicationPackageParameters(Model): """Parameters for an activating an application package. - :param format: The format of the application package binary file. + All required parameters must be populated in order to send to Azure. + + :param format: Required. The format of the application package binary + file. :type format: str """ @@ -28,17 +31,170 @@ class ActivateApplicationPackageParameters(Model): 'format': {'key': 'format', 'type': 'str'}, } - def __init__(self, format): - super(ActivateApplicationPackageParameters, self).__init__() - self.format = format + def __init__(self, **kwargs): + super(ActivateApplicationPackageParameters, self).__init__(**kwargs) + self.format = kwargs.get('format', None) + + +class ProxyResource(Model): + """A definition of an Azure resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ProxyResource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.etag = None + + +class Application(ProxyResource): + """Contains information about an application in a Batch account. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :param display_name: The display name for the application. + :type display_name: str + :param allow_updates: A value indicating whether packages within the + application may be overwritten using the same version string. + :type allow_updates: bool + :param default_version: The package to use if a client requests the + application but does not specify a version. This property can only be set + to the name of an existing package. + :type default_version: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'display_name': {'key': 'properties.displayName', 'type': 'str'}, + 'allow_updates': {'key': 'properties.allowUpdates', 'type': 'bool'}, + 'default_version': {'key': 'properties.defaultVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Application, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + self.allow_updates = kwargs.get('allow_updates', None) + self.default_version = kwargs.get('default_version', None) + + +class ApplicationPackage(ProxyResource): + """An application package which represents a particular version of an + application. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :ivar state: The current state of the application package. Possible values + include: 'Pending', 'Active' + :vartype state: str or ~azure.mgmt.batch.models.PackageState + :ivar format: The format of the application package, if the package is + active. + :vartype format: str + :ivar storage_url: The URL for the application package in Azure Storage. + :vartype storage_url: str + :ivar storage_url_expiry: The UTC time at which the Azure Storage URL will + expire. + :vartype storage_url_expiry: datetime + :ivar last_activation_time: The time at which the package was last + activated, if the package is active. + :vartype last_activation_time: datetime + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + 'state': {'readonly': True}, + 'format': {'readonly': True}, + 'storage_url': {'readonly': True}, + 'storage_url_expiry': {'readonly': True}, + 'last_activation_time': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'state': {'key': 'properties.state', 'type': 'PackageState'}, + 'format': {'key': 'properties.format', 'type': 'str'}, + 'storage_url': {'key': 'properties.storageUrl', 'type': 'str'}, + 'storage_url_expiry': {'key': 'properties.storageUrlExpiry', 'type': 'iso-8601'}, + 'last_activation_time': {'key': 'properties.lastActivationTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ApplicationPackage, self).__init__(**kwargs) + self.state = None + self.format = None + self.storage_url = None + self.storage_url_expiry = None + self.last_activation_time = None class ApplicationPackageReference(Model): """Link to an application package inside the batch account. - :param id: The ID of the application package to install. This must be - inside the same batch account as the pool. This can either be a reference - to a specific version or the default version if one exists. + All required parameters must be populated in order to send to Azure. + + :param id: Required. The ID of the application package to install. This + must be inside the same batch account as the pool. This can either be a + reference to a specific version or the default version if one exists. :type id: str :param version: The version of the application to deploy. If omitted, the default version is deployed. If this is omitted, and no default version is @@ -57,17 +213,19 @@ class ApplicationPackageReference(Model): 'version': {'key': 'version', 'type': 'str'}, } - def __init__(self, id, version=None): - super(ApplicationPackageReference, self).__init__() - self.id = id - self.version = version + def __init__(self, **kwargs): + super(ApplicationPackageReference, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.version = kwargs.get('version', None) class AutoScaleRun(Model): """The results and errors from an execution of a pool autoscale formula. - :param evaluation_time: The time at which the autoscale formula was last - evaluated. + All required parameters must be populated in order to send to Azure. + + :param evaluation_time: Required. The time at which the autoscale formula + was last evaluated. :type evaluation_time: datetime :param results: The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form @@ -88,21 +246,23 @@ class AutoScaleRun(Model): 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, } - def __init__(self, evaluation_time, results=None, error=None): - super(AutoScaleRun, self).__init__() - self.evaluation_time = evaluation_time - self.results = results - self.error = error + def __init__(self, **kwargs): + super(AutoScaleRun, self).__init__(**kwargs) + self.evaluation_time = kwargs.get('evaluation_time', None) + self.results = kwargs.get('results', None) + self.error = kwargs.get('error', None) class AutoScaleRunError(Model): """An error that occurred when autoscaling a pool. - :param code: An identifier for the error. Codes are invariant and are - intended to be consumed programmatically. + All required parameters must be populated in order to send to Azure. + + :param code: Required. An identifier for the error. Codes are invariant + and are intended to be consumed programmatically. :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. + :param message: Required. A message describing the error, intended to be + suitable for display in a user interface. :type message: str :param details: Additional details about the error. :type details: list[~azure.mgmt.batch.models.AutoScaleRunError] @@ -119,18 +279,20 @@ class AutoScaleRunError(Model): 'details': {'key': 'details', 'type': '[AutoScaleRunError]'}, } - def __init__(self, code, message, details=None): - super(AutoScaleRunError, self).__init__() - self.code = code - self.message = message - self.details = details + def __init__(self, **kwargs): + super(AutoScaleRunError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) class AutoScaleSettings(Model): """AutoScale settings for the pool. - :param formula: A formula for the desired number of compute nodes in the - pool. + All required parameters must be populated in order to send to Azure. + + :param formula: Required. A formula for the desired number of compute + nodes in the pool. :type formula: str :param evaluation_interval: The time interval at which to automatically adjust the pool size according to the autoscale formula. If omitted, the @@ -147,31 +309,62 @@ class AutoScaleSettings(Model): 'evaluation_interval': {'key': 'evaluationInterval', 'type': 'duration'}, } - def __init__(self, formula, evaluation_interval=None): - super(AutoScaleSettings, self).__init__() - self.formula = formula - self.evaluation_interval = evaluation_interval + def __init__(self, **kwargs): + super(AutoScaleSettings, self).__init__(**kwargs) + self.formula = kwargs.get('formula', None) + self.evaluation_interval = kwargs.get('evaluation_interval', None) class AutoStorageBaseProperties(Model): """The properties related to the auto-storage account. - :param storage_account_id: The resource ID of the storage account to be - used for auto-storage account. + All required parameters must be populated in order to send to Azure. + + :param storage_account_id: Required. The resource ID of the storage + account to be used for auto-storage account. + :type storage_account_id: str + """ + + _validation = { + 'storage_account_id': {'required': True}, + } + + _attribute_map = { + 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AutoStorageBaseProperties, self).__init__(**kwargs) + self.storage_account_id = kwargs.get('storage_account_id', None) + + +class AutoStorageProperties(AutoStorageBaseProperties): + """Contains information about the auto-storage account associated with a Batch + account. + + All required parameters must be populated in order to send to Azure. + + :param storage_account_id: Required. The resource ID of the storage + account to be used for auto-storage account. :type storage_account_id: str + :param last_key_sync: Required. The UTC time at which storage keys were + last synchronized with the Batch account. + :type last_key_sync: datetime """ _validation = { 'storage_account_id': {'required': True}, + 'last_key_sync': {'required': True}, } _attribute_map = { 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, + 'last_key_sync': {'key': 'lastKeySync', 'type': 'iso-8601'}, } - def __init__(self, storage_account_id): - super(AutoStorageBaseProperties, self).__init__() - self.storage_account_id = storage_account_id + def __init__(self, **kwargs): + super(AutoStorageProperties, self).__init__(**kwargs) + self.last_key_sync = kwargs.get('last_key_sync', None) class AutoUserSpecification(Model): @@ -194,94 +387,261 @@ class AutoUserSpecification(Model): 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, } - def __init__(self, scope=None, elevation_level=None): - super(AutoUserSpecification, self).__init__() - self.scope = scope - self.elevation_level = elevation_level + def __init__(self, **kwargs): + super(AutoUserSpecification, self).__init__(**kwargs) + self.scope = kwargs.get('scope', None) + self.elevation_level = kwargs.get('elevation_level', None) -class BatchAccountCreateParameters(Model): - """Parameters supplied to the Create operation. +class Resource(Model): + """A definition of an Azure resource. - :param location: The region in which to create the account. - :type location: str - :param tags: The user-specified tags associated with the account. - :type tags: dict[str, str] - :param auto_storage: The properties related to the auto-storage account. - :type auto_storage: ~azure.mgmt.batch.models.AutoStorageBaseProperties - :param pool_allocation_mode: The allocation mode to use for creating pools - in the Batch account. The pool allocation mode also affects how clients - may authenticate to the Batch Service API. If the mode is BatchService, - clients may authenticate using access keys or Azure Active Directory. If - the mode is UserSubscription, clients must use Azure Active Directory. The - default is BatchService. Possible values include: 'BatchService', - 'UserSubscription' - :type pool_allocation_mode: str or - ~azure.mgmt.batch.models.PoolAllocationMode - :param key_vault_reference: A reference to the Azure key vault associated - with the Batch account. - :type key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar location: The location of the resource. + :vartype location: str + :ivar tags: The tags of the resource. + :vartype tags: dict[str, str] """ _validation = { - 'location': {'required': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'tags': {'readonly': True}, } _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, - 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageBaseProperties'}, - 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, - 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, } - def __init__(self, location, tags=None, auto_storage=None, pool_allocation_mode=None, key_vault_reference=None): - super(BatchAccountCreateParameters, self).__init__() - self.location = location - self.tags = tags - self.auto_storage = auto_storage - self.pool_allocation_mode = pool_allocation_mode - self.key_vault_reference = key_vault_reference + def __init__(self, **kwargs): + super(Resource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.location = None + self.tags = None -class BatchAccountKeys(Model): - """A set of Azure Batch account keys. +class BatchAccount(Resource): + """Contains information about an Azure Batch account. Variables are only populated by the server, and will be ignored when sending a request. - :ivar account_name: The Batch account name. - :vartype account_name: str - :ivar primary: The primary key associated with the account. - :vartype primary: str - :ivar secondary: The secondary key associated with the account. - :vartype secondary: str - """ - - _validation = { - 'account_name': {'readonly': True}, - 'primary': {'readonly': True}, - 'secondary': {'readonly': True}, - } - - _attribute_map = { - 'account_name': {'key': 'accountName', 'type': 'str'}, - 'primary': {'key': 'primary', 'type': 'str'}, - 'secondary': {'key': 'secondary', 'type': 'str'}, - } - - def __init__(self): - super(BatchAccountKeys, self).__init__() - self.account_name = None - self.primary = None - self.secondary = None - - -class BatchAccountRegenerateKeyParameters(Model): + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar location: The location of the resource. + :vartype location: str + :ivar tags: The tags of the resource. + :vartype tags: dict[str, str] + :ivar account_endpoint: The account endpoint used to interact with the + Batch service. + :vartype account_endpoint: str + :ivar provisioning_state: The provisioned state of the resource. Possible + values include: 'Invalid', 'Creating', 'Deleting', 'Succeeded', 'Failed', + 'Cancelled' + :vartype provisioning_state: str or + ~azure.mgmt.batch.models.ProvisioningState + :ivar pool_allocation_mode: The allocation mode to use for creating pools + in the Batch account. Possible values include: 'BatchService', + 'UserSubscription' + :vartype pool_allocation_mode: str or + ~azure.mgmt.batch.models.PoolAllocationMode + :ivar key_vault_reference: A reference to the Azure key vault associated + with the Batch account. + :vartype key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference + :ivar auto_storage: The properties and status of any auto-storage account + associated with the Batch account. + :vartype auto_storage: ~azure.mgmt.batch.models.AutoStorageProperties + :ivar dedicated_core_quota: The dedicated core quota for the Batch + account. For accounts with PoolAllocationMode set to UserSubscription, + quota is managed on the subscription so this value is not returned. + :vartype dedicated_core_quota: int + :ivar low_priority_core_quota: The low-priority core quota for the Batch + account. For accounts with PoolAllocationMode set to UserSubscription, + quota is managed on the subscription so this value is not returned. + :vartype low_priority_core_quota: int + :ivar dedicated_core_quota_per_vm_family: A list of the dedicated core + quota per Virtual Machine family for the Batch account. For accounts with + PoolAllocationMode set to UserSubscription, quota is managed on the + subscription so this value is not returned. + :vartype dedicated_core_quota_per_vm_family: + list[~azure.mgmt.batch.models.VirtualMachineFamilyCoreQuota] + :ivar dedicated_core_quota_per_vm_family_enforced: A value indicating + whether the core quota for the Batch Account is enforced per Virtual + Machine family or not. Batch is transitioning its core quota system for + dedicated cores to be enforced per Virtual Machine family. During this + transitional phase, the dedicated core quota per Virtual Machine family + may not yet be enforced. If this flag is false, dedicated core quota is + enforced via the old dedicatedCoreQuota property on the account and does + not consider Virtual Machine family. If this flag is true, dedicated core + quota is enforced via the dedicatedCoreQuotaPerVMFamily property on the + account, and the old dedicatedCoreQuota does not apply. + :vartype dedicated_core_quota_per_vm_family_enforced: bool + :ivar pool_quota: The pool quota for the Batch account. + :vartype pool_quota: int + :ivar active_job_and_job_schedule_quota: The active job and job schedule + quota for the Batch account. + :vartype active_job_and_job_schedule_quota: int + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'tags': {'readonly': True}, + 'account_endpoint': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'pool_allocation_mode': {'readonly': True}, + 'key_vault_reference': {'readonly': True}, + 'auto_storage': {'readonly': True}, + 'dedicated_core_quota': {'readonly': True}, + 'low_priority_core_quota': {'readonly': True}, + 'dedicated_core_quota_per_vm_family': {'readonly': True}, + 'dedicated_core_quota_per_vm_family_enforced': {'readonly': True}, + 'pool_quota': {'readonly': True}, + 'active_job_and_job_schedule_quota': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'account_endpoint': {'key': 'properties.accountEndpoint', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'}, + 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, + 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, + 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageProperties'}, + 'dedicated_core_quota': {'key': 'properties.dedicatedCoreQuota', 'type': 'int'}, + 'low_priority_core_quota': {'key': 'properties.lowPriorityCoreQuota', 'type': 'int'}, + 'dedicated_core_quota_per_vm_family': {'key': 'properties.dedicatedCoreQuotaPerVMFamily', 'type': '[VirtualMachineFamilyCoreQuota]'}, + 'dedicated_core_quota_per_vm_family_enforced': {'key': 'properties.dedicatedCoreQuotaPerVMFamilyEnforced', 'type': 'bool'}, + 'pool_quota': {'key': 'properties.poolQuota', 'type': 'int'}, + 'active_job_and_job_schedule_quota': {'key': 'properties.activeJobAndJobScheduleQuota', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(BatchAccount, self).__init__(**kwargs) + self.account_endpoint = None + self.provisioning_state = None + self.pool_allocation_mode = None + self.key_vault_reference = None + self.auto_storage = None + self.dedicated_core_quota = None + self.low_priority_core_quota = None + self.dedicated_core_quota_per_vm_family = None + self.dedicated_core_quota_per_vm_family_enforced = None + self.pool_quota = None + self.active_job_and_job_schedule_quota = None + + +class BatchAccountCreateParameters(Model): + """Parameters supplied to the Create operation. + + All required parameters must be populated in order to send to Azure. + + :param location: Required. The region in which to create the account. + :type location: str + :param tags: The user-specified tags associated with the account. + :type tags: dict[str, str] + :param auto_storage: The properties related to the auto-storage account. + :type auto_storage: ~azure.mgmt.batch.models.AutoStorageBaseProperties + :param pool_allocation_mode: The allocation mode to use for creating pools + in the Batch account. The pool allocation mode also affects how clients + may authenticate to the Batch Service API. If the mode is BatchService, + clients may authenticate using access keys or Azure Active Directory. If + the mode is UserSubscription, clients must use Azure Active Directory. The + default is BatchService. Possible values include: 'BatchService', + 'UserSubscription' + :type pool_allocation_mode: str or + ~azure.mgmt.batch.models.PoolAllocationMode + :param key_vault_reference: A reference to the Azure key vault associated + with the Batch account. + :type key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference + """ + + _validation = { + 'location': {'required': True}, + } + + _attribute_map = { + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageBaseProperties'}, + 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, + 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, + } + + def __init__(self, **kwargs): + super(BatchAccountCreateParameters, self).__init__(**kwargs) + self.location = kwargs.get('location', None) + self.tags = kwargs.get('tags', None) + self.auto_storage = kwargs.get('auto_storage', None) + self.pool_allocation_mode = kwargs.get('pool_allocation_mode', None) + self.key_vault_reference = kwargs.get('key_vault_reference', None) + + +class BatchAccountKeys(Model): + """A set of Azure Batch account keys. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar account_name: The Batch account name. + :vartype account_name: str + :ivar primary: The primary key associated with the account. + :vartype primary: str + :ivar secondary: The secondary key associated with the account. + :vartype secondary: str + """ + + _validation = { + 'account_name': {'readonly': True}, + 'primary': {'readonly': True}, + 'secondary': {'readonly': True}, + } + + _attribute_map = { + 'account_name': {'key': 'accountName', 'type': 'str'}, + 'primary': {'key': 'primary', 'type': 'str'}, + 'secondary': {'key': 'secondary', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BatchAccountKeys, self).__init__(**kwargs) + self.account_name = None + self.primary = None + self.secondary = None + + +class BatchAccountRegenerateKeyParameters(Model): """Parameters supplied to the RegenerateKey operation. - :param key_name: The type of account key to regenerate. Possible values - include: 'Primary', 'Secondary' + All required parameters must be populated in order to send to Azure. + + :param key_name: Required. The type of account key to regenerate. Possible + values include: 'Primary', 'Secondary' :type key_name: str or ~azure.mgmt.batch.models.AccountKeyType """ @@ -293,9 +653,9 @@ class BatchAccountRegenerateKeyParameters(Model): 'key_name': {'key': 'keyName', 'type': 'AccountKeyType'}, } - def __init__(self, key_name): - super(BatchAccountRegenerateKeyParameters, self).__init__() - self.key_name = key_name + def __init__(self, **kwargs): + super(BatchAccountRegenerateKeyParameters, self).__init__(**kwargs) + self.key_name = kwargs.get('key_name', None) class BatchAccountUpdateParameters(Model): @@ -312,10 +672,10 @@ class BatchAccountUpdateParameters(Model): 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageBaseProperties'}, } - def __init__(self, tags=None, auto_storage=None): - super(BatchAccountUpdateParameters, self).__init__() - self.tags = tags - self.auto_storage = auto_storage + def __init__(self, **kwargs): + super(BatchAccountUpdateParameters, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.auto_storage = kwargs.get('auto_storage', None) class BatchLocationQuota(Model): @@ -337,11 +697,100 @@ class BatchLocationQuota(Model): 'account_quota': {'key': 'accountQuota', 'type': 'int'}, } - def __init__(self): - super(BatchLocationQuota, self).__init__() + def __init__(self, **kwargs): + super(BatchLocationQuota, self).__init__(**kwargs) self.account_quota = None +class Certificate(ProxyResource): + """Contains information about a certificate. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :param thumbprint_algorithm: The algorithm of the certificate thumbprint. + This must match the first portion of the certificate name. Currently + required to be 'SHA1'. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate. This must match the + thumbprint from the name. + :type thumbprint: str + :param format: The format of the certificate - either Pfx or Cer. If + omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' + :type format: str or ~azure.mgmt.batch.models.CertificateFormat + :ivar provisioning_state: The provisioned state of the resource. Possible + values include: 'Succeeded', 'Deleting', 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.batch.models.CertificateProvisioningState + :ivar provisioning_state_transition_time: The time at which the + certificate entered its current state. + :vartype provisioning_state_transition_time: datetime + :ivar previous_provisioning_state: The previous provisioned state of the + resource. Possible values include: 'Succeeded', 'Deleting', 'Failed' + :vartype previous_provisioning_state: str or + ~azure.mgmt.batch.models.CertificateProvisioningState + :ivar previous_provisioning_state_transition_time: The time at which the + certificate entered its previous state. + :vartype previous_provisioning_state_transition_time: datetime + :ivar public_data: The public key of the certificate. + :vartype public_data: str + :ivar delete_certificate_error: The error which occurred while deleting + the certificate. This is only returned when the certificate + provisioningState is 'Failed'. + :vartype delete_certificate_error: + ~azure.mgmt.batch.models.DeleteCertificateError + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'provisioning_state_transition_time': {'readonly': True}, + 'previous_provisioning_state': {'readonly': True}, + 'previous_provisioning_state_transition_time': {'readonly': True}, + 'public_data': {'readonly': True}, + 'delete_certificate_error': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, + 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, + 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'CertificateProvisioningState'}, + 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, + 'previous_provisioning_state': {'key': 'properties.previousProvisioningState', 'type': 'CertificateProvisioningState'}, + 'previous_provisioning_state_transition_time': {'key': 'properties.previousProvisioningStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'properties.publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'properties.deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, **kwargs): + super(Certificate, self).__init__(**kwargs) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.thumbprint = kwargs.get('thumbprint', None) + self.format = kwargs.get('format', None) + self.provisioning_state = None + self.provisioning_state_transition_time = None + self.previous_provisioning_state = None + self.previous_provisioning_state_transition_time = None + self.public_data = None + self.delete_certificate_error = None + + class CertificateBaseProperties(Model): """CertificateBaseProperties. @@ -363,35 +812,101 @@ class CertificateBaseProperties(Model): 'format': {'key': 'format', 'type': 'CertificateFormat'}, } - def __init__(self, thumbprint_algorithm=None, thumbprint=None, format=None): - super(CertificateBaseProperties, self).__init__() - self.thumbprint_algorithm = thumbprint_algorithm - self.thumbprint = thumbprint - self.format = format + def __init__(self, **kwargs): + super(CertificateBaseProperties, self).__init__(**kwargs) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.thumbprint = kwargs.get('thumbprint', None) + self.format = kwargs.get('format', None) -class CertificateReference(Model): - """A reference to a certificate to be installed on compute nodes in a pool. - This must exist inside the same account as the pool. +class CertificateCreateOrUpdateParameters(ProxyResource): + """Contains information about a certificate. - :param id: The fully qualified ID of the certificate to install on the - pool. This must be inside the same batch account as the pool. - :type id: str - :param store_location: The location of the certificate store on the - compute node into which to install the certificate. The default value is - currentUser. This property is applicable only for pools configured with - Windows nodes (that is, created with cloudServiceConfiguration, or with - virtualMachineConfiguration using a Windows image reference). For Linux - compute nodes, the certificates are stored in a directory inside the task - working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is - supplied to the task to query for this location. For certificates with - visibility of 'remoteUser', a 'certs' directory is created in the user's - home directory (e.g., /home/{user-name}/certs) and certificates are placed - in that directory. Possible values include: 'CurrentUser', 'LocalMachine' - :type store_location: str or - ~azure.mgmt.batch.models.CertificateStoreLocation - :param store_name: The name of the certificate store on the compute node - into which to install the certificate. This property is applicable only + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :param thumbprint_algorithm: The algorithm of the certificate thumbprint. + This must match the first portion of the certificate name. Currently + required to be 'SHA1'. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate. This must match the + thumbprint from the name. + :type thumbprint: str + :param format: The format of the certificate - either Pfx or Cer. If + omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' + :type format: str or ~azure.mgmt.batch.models.CertificateFormat + :param data: Required. The base64-encoded contents of the certificate. The + maximum size is 10KB. + :type data: str + :param password: The password to access the certificate's private key. + This is required if the certificate format is pfx and must be omitted if + the certificate format is cer. + :type password: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, + 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, + 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, + 'data': {'key': 'properties.data', 'type': 'str'}, + 'password': {'key': 'properties.password', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(CertificateCreateOrUpdateParameters, self).__init__(**kwargs) + self.thumbprint_algorithm = kwargs.get('thumbprint_algorithm', None) + self.thumbprint = kwargs.get('thumbprint', None) + self.format = kwargs.get('format', None) + self.data = kwargs.get('data', None) + self.password = kwargs.get('password', None) + + +class CertificateReference(Model): + """A reference to a certificate to be installed on compute nodes in a pool. + This must exist inside the same account as the pool. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. The fully qualified ID of the certificate to install + on the pool. This must be inside the same batch account as the pool. + :type id: str + :param store_location: The location of the certificate store on the + compute node into which to install the certificate. The default value is + currentUser. This property is applicable only for pools configured with + Windows nodes (that is, created with cloudServiceConfiguration, or with + virtualMachineConfiguration using a Windows image reference). For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. Possible values include: 'CurrentUser', 'LocalMachine' + :type store_location: str or + ~azure.mgmt.batch.models.CertificateStoreLocation + :param store_name: The name of the certificate store on the compute node + into which to install the certificate. This property is applicable only for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). Common store names include: My, Root, CA, Trust, @@ -415,12 +930,12 @@ class CertificateReference(Model): 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, } - def __init__(self, id, store_location=None, store_name=None, visibility=None): - super(CertificateReference, self).__init__() - self.id = id - self.store_location = store_location - self.store_name = store_name - self.visibility = visibility + def __init__(self, **kwargs): + super(CertificateReference, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.store_location = kwargs.get('store_location', None) + self.store_name = kwargs.get('store_name', None) + self.visibility = kwargs.get('visibility', None) class CheckNameAvailabilityParameters(Model): @@ -429,9 +944,11 @@ class CheckNameAvailabilityParameters(Model): Variables are only populated by the server, and will be ignored when sending a request. - :param name: The name to check for availability + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name to check for availability :type name: str - :ivar type: The resource type. Must be set to + :ivar type: Required. The resource type. Must be set to Microsoft.Batch/batchAccounts. Default value: "Microsoft.Batch/batchAccounts" . :vartype type: str @@ -449,9 +966,9 @@ class CheckNameAvailabilityParameters(Model): type = "Microsoft.Batch/batchAccounts" - def __init__(self, name): - super(CheckNameAvailabilityParameters, self).__init__() - self.name = name + def __init__(self, **kwargs): + super(CheckNameAvailabilityParameters, self).__init__(**kwargs) + self.name = kwargs.get('name', None) class CheckNameAvailabilityResult(Model): @@ -485,23 +1002,85 @@ class CheckNameAvailabilityResult(Model): 'message': {'key': 'message', 'type': 'str'}, } - def __init__(self): - super(CheckNameAvailabilityResult, self).__init__() + def __init__(self, **kwargs): + super(CheckNameAvailabilityResult, self).__init__(**kwargs) self.name_available = None self.reason = None self.message = None +class CloudError(Model): + """An error response from the Batch service. + + :param error: + :type error: ~azure.mgmt.batch.models.CloudErrorBody + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'CloudErrorBody'}, + } + + def __init__(self, **kwargs): + super(CloudError, self).__init__(**kwargs) + self.error = kwargs.get('error', None) + + +class CloudErrorException(HttpOperationError): + """Server responsed with exception of type: 'CloudError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args) + + +class CloudErrorBody(Model): + """An error response from the Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: str + :param target: The target of the particular error. For example, the name + of the property in error. + :type target: str + :param details: A list of additional details about the error. + :type details: list[~azure.mgmt.batch.models.CloudErrorBody] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[CloudErrorBody]'}, + } + + def __init__(self, **kwargs): + super(CloudErrorBody, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.target = kwargs.get('target', None) + self.details = kwargs.get('details', None) + + class CloudServiceConfiguration(Model): """The configuration for nodes in a pool based on the Azure Cloud Services platform. - :param os_family: The Azure Guest OS family to be installed on the virtual - machines in the pool. Possible values are: 2 - OS Family 2, equivalent to - Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to Windows Server - 2012. 4 - OS Family 4, equivalent to Windows Server 2012 R2. 5 - OS Family - 5, equivalent to Windows Server 2016. For more information, see Azure - Guest OS Releases + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the pool. Possible values are: 2 - OS Family 2, + equivalent to Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to + Windows Server 2012. 4 - OS Family 4, equivalent to Windows Server 2012 + R2. 5 - OS Family 5, equivalent to Windows Server 2016. 6 - OS Family 6, + equivalent to Windows Server 2019. For more information, see Azure Guest + OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). :type os_family: str :param os_version: The Azure Guest OS version to be installed on the @@ -519,10 +1098,10 @@ class CloudServiceConfiguration(Model): 'os_version': {'key': 'osVersion', 'type': 'str'}, } - def __init__(self, os_family, os_version=None): - super(CloudServiceConfiguration, self).__init__() - self.os_family = os_family - self.os_version = os_version + def __init__(self, **kwargs): + super(CloudServiceConfiguration, self).__init__(**kwargs) + self.os_family = kwargs.get('os_family', None) + self.os_version = kwargs.get('os_version', None) class ContainerConfiguration(Model): @@ -531,7 +1110,9 @@ class ContainerConfiguration(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar type: The container technology to be used. Default value: + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: "DockerCompatible" . :vartype type: str :param container_image_names: The collection of container image names. @@ -559,21 +1140,23 @@ class ContainerConfiguration(Model): type = "DockerCompatible" - def __init__(self, container_image_names=None, container_registries=None): - super(ContainerConfiguration, self).__init__() - self.container_image_names = container_image_names - self.container_registries = container_registries + def __init__(self, **kwargs): + super(ContainerConfiguration, self).__init__(**kwargs) + self.container_image_names = kwargs.get('container_image_names', None) + self.container_registries = kwargs.get('container_registries', None) class ContainerRegistry(Model): """A private container registry. + All required parameters must be populated in order to send to Azure. + :param registry_server: The registry URL. If omitted, the default is "docker.io". :type registry_server: str - :param user_name: The user name to log into the registry server. + :param user_name: Required. The user name to log into the registry server. :type user_name: str - :param password: The password to log into the registry server. + :param password: Required. The password to log into the registry server. :type password: str """ @@ -588,20 +1171,22 @@ class ContainerRegistry(Model): 'password': {'key': 'password', 'type': 'str'}, } - def __init__(self, user_name, password, registry_server=None): - super(ContainerRegistry, self).__init__() - self.registry_server = registry_server - self.user_name = user_name - self.password = password + def __init__(self, **kwargs): + super(ContainerRegistry, self).__init__(**kwargs) + self.registry_server = kwargs.get('registry_server', None) + self.user_name = kwargs.get('user_name', None) + self.password = kwargs.get('password', None) class DataDisk(Model): """Data Disk settings which will be used by the data disks associated to Compute Nodes in the pool. - :param lun: The logical unit number. The lun is used to uniquely identify - each data disk. If attaching multiple disks, each should have a distinct - lun. + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. :type lun: int :param caching: The type of caching to be enabled for the data disks. Values are: @@ -613,8 +1198,8 @@ class DataDisk(Model): https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. Possible values include: 'None', 'ReadOnly', 'ReadWrite' :type caching: str or ~azure.mgmt.batch.models.CachingType - :param disk_size_gb: The initial disk size in GB when creating new data - disk. + :param disk_size_gb: Required. The initial disk size in GB when creating + new data disk. :type disk_size_gb: int :param storage_account_type: The storage account type to be used for the data disk. If omitted, the default is "Standard_LRS". Values are: @@ -638,22 +1223,24 @@ class DataDisk(Model): 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, } - def __init__(self, lun, disk_size_gb, caching=None, storage_account_type=None): - super(DataDisk, self).__init__() - self.lun = lun - self.caching = caching - self.disk_size_gb = disk_size_gb - self.storage_account_type = storage_account_type + def __init__(self, **kwargs): + super(DataDisk, self).__init__(**kwargs) + self.lun = kwargs.get('lun', None) + self.caching = kwargs.get('caching', None) + self.disk_size_gb = kwargs.get('disk_size_gb', None) + self.storage_account_type = kwargs.get('storage_account_type', None) class DeleteCertificateError(Model): """An error response from the Batch service. - :param code: An identifier for the error. Codes are invariant and are - intended to be consumed programmatically. + All required parameters must be populated in order to send to Azure. + + :param code: Required. An identifier for the error. Codes are invariant + and are intended to be consumed programmatically. :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. + :param message: Required. A message describing the error, intended to be + suitable for display in a user interface. :type message: str :param target: The target of the particular error. For example, the name of the property in error. @@ -674,12 +1261,12 @@ class DeleteCertificateError(Model): 'details': {'key': 'details', 'type': '[DeleteCertificateError]'}, } - def __init__(self, code, message, target=None, details=None): - super(DeleteCertificateError, self).__init__() - self.code = code - self.message = message - self.target = target - self.details = details + def __init__(self, **kwargs): + super(DeleteCertificateError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.target = kwargs.get('target', None) + self.details = kwargs.get('details', None) class DeploymentConfiguration(Model): @@ -704,16 +1291,18 @@ class DeploymentConfiguration(Model): 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, } - def __init__(self, cloud_service_configuration=None, virtual_machine_configuration=None): - super(DeploymentConfiguration, self).__init__() - self.cloud_service_configuration = cloud_service_configuration - self.virtual_machine_configuration = virtual_machine_configuration + def __init__(self, **kwargs): + super(DeploymentConfiguration, self).__init__(**kwargs) + self.cloud_service_configuration = kwargs.get('cloud_service_configuration', None) + self.virtual_machine_configuration = kwargs.get('virtual_machine_configuration', None) class EnvironmentSetting(Model): """An environment variable to be set on a task process. - :param name: The name of the environment variable. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. :type name: str :param value: The value of the environment variable. :type value: str @@ -728,10 +1317,10 @@ class EnvironmentSetting(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, name, value=None): - super(EnvironmentSetting, self).__init__() - self.name = name - self.value = value + def __init__(self, **kwargs): + super(EnvironmentSetting, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) class FixedScaleSettings(Model): @@ -767,12 +1356,12 @@ class FixedScaleSettings(Model): 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, } - def __init__(self, resize_timeout=None, target_dedicated_nodes=None, target_low_priority_nodes=None, node_deallocation_option=None): - super(FixedScaleSettings, self).__init__() - self.resize_timeout = resize_timeout - self.target_dedicated_nodes = target_dedicated_nodes - self.target_low_priority_nodes = target_low_priority_nodes - self.node_deallocation_option = node_deallocation_option + def __init__(self, **kwargs): + super(FixedScaleSettings, self).__init__(**kwargs) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) class ImageReference(Model): @@ -815,43 +1404,45 @@ class ImageReference(Model): 'id': {'key': 'id', 'type': 'str'}, } - def __init__(self, publisher=None, offer=None, sku=None, version=None, id=None): - super(ImageReference, self).__init__() - self.publisher = publisher - self.offer = offer - self.sku = sku - self.version = version - self.id = id + def __init__(self, **kwargs): + super(ImageReference, self).__init__(**kwargs) + self.publisher = kwargs.get('publisher', None) + self.offer = kwargs.get('offer', None) + self.sku = kwargs.get('sku', None) + self.version = kwargs.get('version', None) + self.id = kwargs.get('id', None) class InboundNatPool(Model): """A inbound NAT pool that can be used to address specific ports on compute nodes in a Batch pool externally. - :param name: The name of the endpoint. The name must be unique within a - Batch pool, can contain letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. :type name: str - :param protocol: The protocol of the endpoint. Possible values include: - 'TCP', 'UDP' + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'TCP', 'UDP' :type protocol: str or ~azure.mgmt.batch.models.InboundEndpointProtocol - :param backend_port: The port number on the compute node. This must be - unique within a Batch pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are reserved. If any + :param backend_port: Required. The port number on the compute node. This + must be unique within a Batch pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. :type backend_port: int - :param frontend_port_range_start: The first port number in the range of - external ports that will be used to provide inbound access to the + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the backendPort on individual compute nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400. :type frontend_port_range_start: int - :param frontend_port_range_end: The last port number in the range of - external ports that will be used to provide inbound access to the + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the backendPort on individual compute nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a pool must be distinct and cannot overlap. If @@ -886,24 +1477,26 @@ class InboundNatPool(Model): 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, } - def __init__(self, name, protocol, backend_port, frontend_port_range_start, frontend_port_range_end, network_security_group_rules=None): - super(InboundNatPool, self).__init__() - self.name = name - self.protocol = protocol - self.backend_port = backend_port - self.frontend_port_range_start = frontend_port_range_start - self.frontend_port_range_end = frontend_port_range_end - self.network_security_group_rules = network_security_group_rules + def __init__(self, **kwargs): + super(InboundNatPool, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.protocol = kwargs.get('protocol', None) + self.backend_port = kwargs.get('backend_port', None) + self.frontend_port_range_start = kwargs.get('frontend_port_range_start', None) + self.frontend_port_range_end = kwargs.get('frontend_port_range_end', None) + self.network_security_group_rules = kwargs.get('network_security_group_rules', None) class KeyVaultReference(Model): """Identifies the Azure key vault associated with a Batch account. - :param id: The resource ID of the Azure key vault associated with the - Batch account. + All required parameters must be populated in order to send to Azure. + + :param id: Required. The resource ID of the Azure key vault associated + with the Batch account. :type id: str - :param url: The URL of the Azure key vault associated with the Batch - account. + :param url: Required. The URL of the Azure key vault associated with the + Batch account. :type url: str """ @@ -917,10 +1510,10 @@ class KeyVaultReference(Model): 'url': {'key': 'url', 'type': 'str'}, } - def __init__(self, id, url): - super(KeyVaultReference, self).__init__() - self.id = id - self.url = url + def __init__(self, **kwargs): + super(KeyVaultReference, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.url = kwargs.get('url', None) class LinuxUserConfiguration(Model): @@ -951,11 +1544,11 @@ class LinuxUserConfiguration(Model): 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, } - def __init__(self, uid=None, gid=None, ssh_private_key=None): - super(LinuxUserConfiguration, self).__init__() - self.uid = uid - self.gid = gid - self.ssh_private_key = ssh_private_key + def __init__(self, **kwargs): + super(LinuxUserConfiguration, self).__init__(**kwargs) + self.uid = kwargs.get('uid', None) + self.gid = kwargs.get('gid', None) + self.ssh_private_key = kwargs.get('ssh_private_key', None) class MetadataItem(Model): @@ -964,9 +1557,11 @@ class MetadataItem(Model): The Batch service does not assign any meaning to this metadata; it is solely for the use of user code. - :param name: The name of the metadata item. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. :type name: str - :param value: The value of the metadata item. + :param value: Required. The value of the metadata item. :type value: str """ @@ -980,10 +1575,10 @@ class MetadataItem(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, name, value): - super(MetadataItem, self).__init__() - self.name = name - self.value = value + def __init__(self, **kwargs): + super(MetadataItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) class NetworkConfiguration(Model): @@ -1027,31 +1622,34 @@ class NetworkConfiguration(Model): 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, } - def __init__(self, subnet_id=None, endpoint_configuration=None): - super(NetworkConfiguration, self).__init__() - self.subnet_id = subnet_id - self.endpoint_configuration = endpoint_configuration + def __init__(self, **kwargs): + super(NetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) + self.endpoint_configuration = kwargs.get('endpoint_configuration', None) class NetworkSecurityGroupRule(Model): """A network security group rule to apply to an inbound endpoint. - :param priority: The priority for this rule. Priorities within a pool must - be unique and are evaluated in order of priority. The lower the number the - higher the priority. For example, rules could be specified with order - numbers of 150, 250, and 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. Allowed priorities are - 150 to 3500. If any reserved or duplicate values are provided the request - fails with HTTP status code 400. + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. :type priority: int - :param access: The action that should be taken for a specified IP address, - subnet range or tag. Possible values include: 'Allow', 'Deny' + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'Allow', 'Deny' :type access: str or ~azure.mgmt.batch.models.NetworkSecurityGroupRuleAccess - :param source_address_prefix: The source address prefix or tag to match - for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP - subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If - any other values are provided the request fails with HTTP status code 400. + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. :type source_address_prefix: str """ @@ -1067,11 +1665,11 @@ class NetworkSecurityGroupRule(Model): 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, } - def __init__(self, priority, access, source_address_prefix): - super(NetworkSecurityGroupRule, self).__init__() - self.priority = priority - self.access = access - self.source_address_prefix = source_address_prefix + def __init__(self, **kwargs): + super(NetworkSecurityGroupRule, self).__init__(**kwargs) + self.priority = kwargs.get('priority', None) + self.access = kwargs.get('access', None) + self.source_address_prefix = kwargs.get('source_address_prefix', None) class Operation(Model): @@ -1095,12 +1693,12 @@ class Operation(Model): 'properties': {'key': 'properties', 'type': 'object'}, } - def __init__(self, name=None, display=None, origin=None, properties=None): - super(Operation, self).__init__() - self.name = name - self.display = display - self.origin = origin - self.properties = properties + def __init__(self, **kwargs): + super(Operation, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.display = kwargs.get('display', None) + self.origin = kwargs.get('origin', None) + self.properties = kwargs.get('properties', None) class OperationDisplay(Model): @@ -1124,40 +1722,16 @@ class OperationDisplay(Model): 'description': {'key': 'description', 'type': 'str'}, } - def __init__(self, provider=None, operation=None, resource=None, description=None): - super(OperationDisplay, self).__init__() - self.provider = provider - self.operation = operation - self.resource = resource - self.description = description + def __init__(self, **kwargs): + super(OperationDisplay, self).__init__(**kwargs) + self.provider = kwargs.get('provider', None) + self.operation = kwargs.get('operation', None) + self.resource = kwargs.get('resource', None) + self.description = kwargs.get('description', None) -class PoolEndpointConfiguration(Model): - """The endpoint configuration for a pool. - - :param inbound_nat_pools: A list of inbound NAT pools that can be used to - address specific ports on an individual compute node externally. The - maximum number of inbound NAT pools per Batch pool is 5. If the maximum - number of inbound NAT pools is exceeded the request fails with HTTP status - code 400. - :type inbound_nat_pools: list[~azure.mgmt.batch.models.InboundNatPool] - """ - - _validation = { - 'inbound_nat_pools': {'required': True}, - } - - _attribute_map = { - 'inbound_nat_pools': {'key': 'inboundNatPools', 'type': '[InboundNatPool]'}, - } - - def __init__(self, inbound_nat_pools): - super(PoolEndpointConfiguration, self).__init__() - self.inbound_nat_pools = inbound_nat_pools - - -class ProxyResource(Model): - """A definition of an Azure resource. +class Pool(ProxyResource): + """Contains information about a pool. Variables are only populated by the server, and will be ignored when sending a request. @@ -1170,6 +1744,123 @@ class ProxyResource(Model): :vartype type: str :ivar etag: The ETag of the resource, used for concurrency statements. :vartype etag: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :ivar last_modified: The last modified time of the pool. This is the last + time at which the pool level data, such as the targetDedicatedNodes or + autoScaleSettings, changed. It does not factor in node-level changes such + as a compute node changing state. + :vartype last_modified: datetime + :ivar creation_time: The creation time of the pool. + :vartype creation_time: datetime + :ivar provisioning_state: The current state of the pool. Possible values + include: 'Succeeded', 'Deleting' + :vartype provisioning_state: str or + ~azure.mgmt.batch.models.PoolProvisioningState + :ivar provisioning_state_transition_time: The time at which the pool + entered its current state. + :vartype provisioning_state_transition_time: datetime + :ivar allocation_state: Whether the pool is resizing. Possible values + include: 'Steady', 'Resizing', 'Stopping' + :vartype allocation_state: str or ~azure.mgmt.batch.models.AllocationState + :ivar allocation_state_transition_time: The time at which the pool entered + its current allocation state. + :vartype allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the pool. All VMs in a + pool are the same size. For information about available sizes of virtual + machines for Cloud Services pools (pools created with + cloudServiceConfiguration), see Sizes for Cloud Services + (http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall. For + information about available VM sizes for pools using images from the + Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param deployment_configuration: This property describes how the pool + nodes will be deployed - using Cloud Services or Virtual Machines. Using + CloudServiceConfiguration specifies that the nodes should be creating + using Azure Cloud Services (PaaS), while VirtualMachineConfiguration uses + Azure Virtual Machines (IaaS). + :type deployment_configuration: + ~azure.mgmt.batch.models.DeploymentConfiguration + :ivar current_dedicated_nodes: The number of compute nodes currently in + the pool. + :vartype current_dedicated_nodes: int + :ivar current_low_priority_nodes: The number of low priority compute nodes + currently in the pool. + :vartype current_low_priority_nodes: int + :param scale_settings: Settings which configure the number of nodes in the + pool. + :type scale_settings: ~azure.mgmt.batch.models.ScaleSettings + :ivar auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the pool automatically + scales, i.e. autoScaleSettings are used. + :vartype auto_scale_run: ~azure.mgmt.batch.models.AutoScaleRun + :param inter_node_communication: Whether the pool permits direct + communication between nodes. This imposes restrictions on which nodes can + be assigned to the pool. Enabling this value can reduce the chance of the + requested number of nodes to be allocated in the pool. If not specified, + this value defaults to 'Disabled'. Possible values include: 'Enabled', + 'Disabled' + :type inter_node_communication: str or + ~azure.mgmt.batch.models.InterNodeCommunicationState + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.mgmt.batch.models.NetworkConfiguration + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: + ~azure.mgmt.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.mgmt.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.mgmt.batch.models.MetadataItem] + :param start_task: A task specified to run on each compute node as it + joins the pool. In an PATCH (update) operation, this property can be set + to an empty object to remove the start task from the pool. + :type start_task: ~azure.mgmt.batch.models.StartTask + :param certificates: The list of certificates to be installed on each + compute node in the pool. For Windows compute nodes, the Batch service + installs the certificates to the specified certificate store and location. + For Linux compute nodes, the certificates are stored in a directory inside + the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificates: list[~azure.mgmt.batch.models.CertificateReference] + :param application_packages: The list of application packages to be + installed on each compute node in the pool. Changes to application package + references affect all new compute nodes joining the pool, but do not + affect compute nodes that are already in the pool until they are rebooted + or reimaged. There is a maximum of 10 application package references on + any given pool. + :type application_packages: + list[~azure.mgmt.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :ivar resize_operation_status: Contains details about the current or last + completed resize operation. + :vartype resize_operation_status: + ~azure.mgmt.batch.models.ResizeOperationStatus """ _validation = { @@ -1177,6 +1868,16 @@ class ProxyResource(Model): 'name': {'readonly': True}, 'type': {'readonly': True}, 'etag': {'readonly': True}, + 'last_modified': {'readonly': True}, + 'creation_time': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'provisioning_state_transition_time': {'readonly': True}, + 'allocation_state': {'readonly': True}, + 'allocation_state_transition_time': {'readonly': True}, + 'current_dedicated_nodes': {'readonly': True}, + 'current_low_priority_nodes': {'readonly': True}, + 'auto_scale_run': {'readonly': True}, + 'resize_operation_status': {'readonly': True}, } _attribute_map = { @@ -1184,24 +1885,96 @@ class ProxyResource(Model): 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, + 'display_name': {'key': 'properties.displayName', 'type': 'str'}, + 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'PoolProvisioningState'}, + 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'properties.allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'properties.allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'properties.vmSize', 'type': 'str'}, + 'deployment_configuration': {'key': 'properties.deploymentConfiguration', 'type': 'DeploymentConfiguration'}, + 'current_dedicated_nodes': {'key': 'properties.currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'properties.currentLowPriorityNodes', 'type': 'int'}, + 'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'}, + 'auto_scale_run': {'key': 'properties.autoScaleRun', 'type': 'AutoScaleRun'}, + 'inter_node_communication': {'key': 'properties.interNodeCommunication', 'type': 'InterNodeCommunicationState'}, + 'network_configuration': {'key': 'properties.networkConfiguration', 'type': 'NetworkConfiguration'}, + 'max_tasks_per_node': {'key': 'properties.maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'properties.taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'properties.userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'properties.metadata', 'type': '[MetadataItem]'}, + 'start_task': {'key': 'properties.startTask', 'type': 'StartTask'}, + 'certificates': {'key': 'properties.certificates', 'type': '[CertificateReference]'}, + 'application_packages': {'key': 'properties.applicationPackages', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'properties.applicationLicenses', 'type': '[str]'}, + 'resize_operation_status': {'key': 'properties.resizeOperationStatus', 'type': 'ResizeOperationStatus'}, } - def __init__(self): - super(ProxyResource, self).__init__() - self.id = None - self.name = None - self.type = None - self.etag = None + def __init__(self, **kwargs): + super(Pool, self).__init__(**kwargs) + self.display_name = kwargs.get('display_name', None) + self.last_modified = None + self.creation_time = None + self.provisioning_state = None + self.provisioning_state_transition_time = None + self.allocation_state = None + self.allocation_state_transition_time = None + self.vm_size = kwargs.get('vm_size', None) + self.deployment_configuration = kwargs.get('deployment_configuration', None) + self.current_dedicated_nodes = None + self.current_low_priority_nodes = None + self.scale_settings = kwargs.get('scale_settings', None) + self.auto_scale_run = None + self.inter_node_communication = kwargs.get('inter_node_communication', None) + self.network_configuration = kwargs.get('network_configuration', None) + self.max_tasks_per_node = kwargs.get('max_tasks_per_node', None) + self.task_scheduling_policy = kwargs.get('task_scheduling_policy', None) + self.user_accounts = kwargs.get('user_accounts', None) + self.metadata = kwargs.get('metadata', None) + self.start_task = kwargs.get('start_task', None) + self.certificates = kwargs.get('certificates', None) + self.application_packages = kwargs.get('application_packages', None) + self.application_licenses = kwargs.get('application_licenses', None) + self.resize_operation_status = None + + +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a pool. + + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT pools that can + be used to address specific ports on an individual compute node + externally. The maximum number of inbound NAT pools per Batch pool is 5. + If the maximum number of inbound NAT pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.mgmt.batch.models.InboundNatPool] + """ + + _validation = { + 'inbound_nat_pools': {'required': True}, + } + + _attribute_map = { + 'inbound_nat_pools': {'key': 'inboundNatPools', 'type': '[InboundNatPool]'}, + } + + def __init__(self, **kwargs): + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = kwargs.get('inbound_nat_pools', None) class ResizeError(Model): """An error that occurred when resizing a pool. - :param code: An identifier for the error. Codes are invariant and are - intended to be consumed programmatically. + All required parameters must be populated in order to send to Azure. + + :param code: Required. An identifier for the error. Codes are invariant + and are intended to be consumed programmatically. :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. + :param message: Required. A message describing the error, intended to be + suitable for display in a user interface. :type message: str :param details: Additional details about the error. :type details: list[~azure.mgmt.batch.models.ResizeError] @@ -1218,11 +1991,11 @@ class ResizeError(Model): 'details': {'key': 'details', 'type': '[ResizeError]'}, } - def __init__(self, code, message, details=None): - super(ResizeError, self).__init__() - self.code = code - self.message = message - self.details = details + def __init__(self, **kwargs): + super(ResizeError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) + self.details = kwargs.get('details', None) class ResizeOperationStatus(Model): @@ -1267,57 +2040,14 @@ class ResizeOperationStatus(Model): 'errors': {'key': 'errors', 'type': '[ResizeError]'}, } - def __init__(self, target_dedicated_nodes=None, target_low_priority_nodes=None, resize_timeout=None, node_deallocation_option=None, start_time=None, errors=None): - super(ResizeOperationStatus, self).__init__() - self.target_dedicated_nodes = target_dedicated_nodes - self.target_low_priority_nodes = target_low_priority_nodes - self.resize_timeout = resize_timeout - self.node_deallocation_option = node_deallocation_option - self.start_time = start_time - self.errors = errors - - -class Resource(Model): - """A definition of an Azure resource. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar location: The location of the resource. - :vartype location: str - :ivar tags: The tags of the resource. - :vartype tags: dict[str, str] - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'location': {'readonly': True}, - 'tags': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self): - super(Resource, self).__init__() - self.id = None - self.name = None - self.type = None - self.location = None - self.tags = None + def __init__(self, **kwargs): + super(ResizeOperationStatus, self).__init__(**kwargs) + self.target_dedicated_nodes = kwargs.get('target_dedicated_nodes', None) + self.target_low_priority_nodes = kwargs.get('target_low_priority_nodes', None) + self.resize_timeout = kwargs.get('resize_timeout', None) + self.node_deallocation_option = kwargs.get('node_deallocation_option', None) + self.start_time = kwargs.get('start_time', None) + self.errors = kwargs.get('errors', None) class ResourceFile(Model): @@ -1333,10 +2063,10 @@ class ResourceFile(Model): httpUrl properties are mutually exclusive and one of them must be specified. This URL must be readable and listable using anonymous access; that is, the Batch service does not present any credentials when - downloading blobs from the container. There are two ways to get such a URL - for a container in Azure storage: include a Shared Access Signature (SAS) - granting read and list permissions on the container, or set the ACL for - the container to allow public access. + downloading the blob. There are two ways to get such a URL for a blob in + Azure storage: include a Shared Access Signature (SAS) granting read and + list permissions on the blob, or set the ACL for the blob or its container + to allow public access. :type storage_container_url: str :param http_url: The URL of the file to download. The autoStorageContainerName, storageContainerUrl and httpUrl properties are @@ -1383,14 +2113,14 @@ class ResourceFile(Model): 'file_mode': {'key': 'fileMode', 'type': 'str'}, } - def __init__(self, auto_storage_container_name=None, storage_container_url=None, http_url=None, blob_prefix=None, file_path=None, file_mode=None): - super(ResourceFile, self).__init__() - self.auto_storage_container_name = auto_storage_container_name - self.storage_container_url = storage_container_url - self.http_url = http_url - self.blob_prefix = blob_prefix - self.file_path = file_path - self.file_mode = file_mode + def __init__(self, **kwargs): + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = kwargs.get('auto_storage_container_name', None) + self.storage_container_url = kwargs.get('storage_container_url', None) + self.http_url = kwargs.get('http_url', None) + self.blob_prefix = kwargs.get('blob_prefix', None) + self.file_path = kwargs.get('file_path', None) + self.file_mode = kwargs.get('file_mode', None) class ScaleSettings(Model): @@ -1417,16 +2147,23 @@ class ScaleSettings(Model): 'auto_scale': {'key': 'autoScale', 'type': 'AutoScaleSettings'}, } - def __init__(self, fixed_scale=None, auto_scale=None): - super(ScaleSettings, self).__init__() - self.fixed_scale = fixed_scale - self.auto_scale = auto_scale + def __init__(self, **kwargs): + super(ScaleSettings, self).__init__(**kwargs) + self.fixed_scale = kwargs.get('fixed_scale', None) + self.auto_scale = kwargs.get('auto_scale', None) class StartTask(Model): """A task which is run when a compute node joins a pool in the Azure Batch service, or when the compute node is rebooted or reimaged. + In some cases the start task may be re-run even though the node was not + rebooted. Due to this, start tasks should be idempotent and exit gracefully + if the setup they're performing has already been done. Special care should + be taken to avoid start tasks which create breakaway process or + install/launch services from the start task working directory, as this will + block Batch from being able to re-run the start task. + :param command_line: The command line of the start task. The command line does not run under a shell, and therefore cannot take advantage of shell features such as environment variable expansion. If you want to take @@ -1486,28 +2223,30 @@ class StartTask(Model): 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, } - def __init__(self, command_line=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None, container_settings=None): - super(StartTask, self).__init__() - self.command_line = command_line - self.resource_files = resource_files - self.environment_settings = environment_settings - self.user_identity = user_identity - self.max_task_retry_count = max_task_retry_count - self.wait_for_success = wait_for_success - self.container_settings = container_settings + def __init__(self, **kwargs): + super(StartTask, self).__init__(**kwargs) + self.command_line = kwargs.get('command_line', None) + self.resource_files = kwargs.get('resource_files', None) + self.environment_settings = kwargs.get('environment_settings', None) + self.user_identity = kwargs.get('user_identity', None) + self.max_task_retry_count = kwargs.get('max_task_retry_count', None) + self.wait_for_success = kwargs.get('wait_for_success', None) + self.container_settings = kwargs.get('container_settings', None) class TaskContainerSettings(Model): """The container settings for a task. + All required parameters must be populated in order to send to Azure. + :param container_run_options: Additional options to the container create command. These additional options are supplied as arguments to the "docker create" command, in addition to those controlled by the Batch Service. :type container_run_options: str - :param image_name: The image to use to create the container in which the - task will run. This is the full image reference, as would be specified to - "docker pull". If no tag is provided as part of the image name, the tag - ":latest" is used as a default. + :param image_name: Required. The image to use to create the container in + which the task will run. This is the full image reference, as would be + specified to "docker pull". If no tag is provided as part of the image + name, the tag ":latest" is used as a default. :type image_name: str :param registry: The private registry which contains the container image. This setting can be omitted if was already provided at pool creation. @@ -1524,18 +2263,20 @@ class TaskContainerSettings(Model): 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, } - def __init__(self, image_name, container_run_options=None, registry=None): - super(TaskContainerSettings, self).__init__() - self.container_run_options = container_run_options - self.image_name = image_name - self.registry = registry + def __init__(self, **kwargs): + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = kwargs.get('container_run_options', None) + self.image_name = kwargs.get('image_name', None) + self.registry = kwargs.get('registry', None) class TaskSchedulingPolicy(Model): """Specifies how tasks should be distributed across compute nodes. - :param node_fill_type: How tasks should be distributed across compute - nodes. Possible values include: 'Spread', 'Pack' + All required parameters must be populated in order to send to Azure. + + :param node_fill_type: Required. How tasks should be distributed across + compute nodes. Possible values include: 'Spread', 'Pack' :type node_fill_type: str or ~azure.mgmt.batch.models.ComputeNodeFillType """ @@ -1547,17 +2288,19 @@ class TaskSchedulingPolicy(Model): 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, } - def __init__(self, node_fill_type): - super(TaskSchedulingPolicy, self).__init__() - self.node_fill_type = node_fill_type + def __init__(self, **kwargs): + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = kwargs.get('node_fill_type', None) class UserAccount(Model): """Properties used to create a user on an Azure Batch node. - :param name: The name of the user account. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the user account. :type name: str - :param password: The password for the user account. + :param password: Required. The password for the user account. :type password: str :param elevation_level: The elevation level of the user account. nonAdmin - The auto user is a standard user without elevated access. admin - The @@ -1591,13 +2334,13 @@ class UserAccount(Model): 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, } - def __init__(self, name, password, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None): - super(UserAccount, self).__init__() - self.name = name - self.password = password - self.elevation_level = elevation_level - self.linux_user_configuration = linux_user_configuration - self.windows_user_configuration = windows_user_configuration + def __init__(self, **kwargs): + super(UserAccount, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.password = kwargs.get('password', None) + self.elevation_level = kwargs.get('elevation_level', None) + self.linux_user_configuration = kwargs.get('linux_user_configuration', None) + self.windows_user_configuration = kwargs.get('windows_user_configuration', None) class UserIdentity(Model): @@ -1620,20 +2363,22 @@ class UserIdentity(Model): 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, } - def __init__(self, user_name=None, auto_user=None): - super(UserIdentity, self).__init__() - self.user_name = user_name - self.auto_user = auto_user + def __init__(self, **kwargs): + super(UserIdentity, self).__init__(**kwargs) + self.user_name = kwargs.get('user_name', None) + self.auto_user = kwargs.get('auto_user', None) class VirtualMachineConfiguration(Model): """The configuration for compute nodes in a pool based on the Azure Virtual Machines infrastructure. - :param image_reference: A reference to the Azure Virtual Machines - Marketplace Image or the custom Virtual Machine Image to use. + All required parameters must be populated in order to send to Azure. + + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace Image or the custom Virtual Machine Image to use. :type image_reference: ~azure.mgmt.batch.models.ImageReference - :param node_agent_sku_id: The SKU of the Batch node agent to be + :param node_agent_sku_id: Required. The SKU of the Batch node agent to be provisioned on compute nodes in the pool. The Batch node agent is a program that runs on each node in the pool, and provides the command-and-control interface between the node and the Batch service. @@ -1682,639 +2427,75 @@ class VirtualMachineConfiguration(Model): 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, } - def __init__(self, image_reference, node_agent_sku_id, windows_configuration=None, data_disks=None, license_type=None, container_configuration=None): - super(VirtualMachineConfiguration, self).__init__() - self.image_reference = image_reference - self.node_agent_sku_id = node_agent_sku_id - self.windows_configuration = windows_configuration - self.data_disks = data_disks - self.license_type = license_type - self.container_configuration = container_configuration - - -class WindowsConfiguration(Model): - """Windows operating system settings to apply to the virtual machine. - - :param enable_automatic_updates: Whether automatic updates are enabled on - the virtual machine. If omitted, the default value is true. - :type enable_automatic_updates: bool - """ - - _attribute_map = { - 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, - } - - def __init__(self, enable_automatic_updates=None): - super(WindowsConfiguration, self).__init__() - self.enable_automatic_updates = enable_automatic_updates - - -class WindowsUserConfiguration(Model): - """Properties used to create a user account on a Windows node. - - :param login_mode: Login mode for user. Specifies login mode for the user. - The default value for VirtualMachineConfiguration pools is interactive - mode and for CloudServiceConfiguration pools is batch mode. Possible - values include: 'Batch', 'Interactive' - :type login_mode: str or ~azure.mgmt.batch.models.LoginMode - """ - - _attribute_map = { - 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, - } - - def __init__(self, login_mode=None): - super(WindowsUserConfiguration, self).__init__() - self.login_mode = login_mode + def __init__(self, **kwargs): + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = kwargs.get('image_reference', None) + self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) + self.windows_configuration = kwargs.get('windows_configuration', None) + self.data_disks = kwargs.get('data_disks', None) + self.license_type = kwargs.get('license_type', None) + self.container_configuration = kwargs.get('container_configuration', None) -class Application(ProxyResource): - """Contains information about an application in a Batch account. +class VirtualMachineFamilyCoreQuota(Model): + """A VM Family and its associated core quota for the Batch account. Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. + :ivar name: The Virtual Machine family name. :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param display_name: The display name for the application. - :type display_name: str - :param allow_updates: A value indicating whether packages within the - application may be overwritten using the same version string. - :type allow_updates: bool - :param default_version: The package to use if a client requests the - application but does not specify a version. This property can only be set - to the name of an existing package. - :type default_version: str + :ivar core_quota: The core quota for the VM family for the Batch account. + :vartype core_quota: int """ _validation = { - 'id': {'readonly': True}, 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, + 'core_quota': {'readonly': True}, } _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'display_name': {'key': 'properties.displayName', 'type': 'str'}, - 'allow_updates': {'key': 'properties.allowUpdates', 'type': 'bool'}, - 'default_version': {'key': 'properties.defaultVersion', 'type': 'str'}, + 'core_quota': {'key': 'coreQuota', 'type': 'int'}, } - def __init__(self, display_name=None, allow_updates=None, default_version=None): - super(Application, self).__init__() - self.display_name = display_name - self.allow_updates = allow_updates - self.default_version = default_version - + def __init__(self, **kwargs): + super(VirtualMachineFamilyCoreQuota, self).__init__(**kwargs) + self.name = None + self.core_quota = None -class ApplicationPackage(ProxyResource): - """An application package which represents a particular version of an - application. - Variables are only populated by the server, and will be ignored when - sending a request. +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :ivar state: The current state of the application package. Possible values - include: 'Pending', 'Active' - :vartype state: str or ~azure.mgmt.batch.models.PackageState - :ivar format: The format of the application package, if the package is - active. - :vartype format: str - :ivar storage_url: The URL for the application package in Azure Storage. - :vartype storage_url: str - :ivar storage_url_expiry: The UTC time at which the Azure Storage URL will - expire. - :vartype storage_url_expiry: datetime - :ivar last_activation_time: The time at which the package was last - activated, if the package is active. - :vartype last_activation_time: datetime + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool """ - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'state': {'readonly': True}, - 'format': {'readonly': True}, - 'storage_url': {'readonly': True}, - 'storage_url_expiry': {'readonly': True}, - 'last_activation_time': {'readonly': True}, - } - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'state': {'key': 'properties.state', 'type': 'PackageState'}, - 'format': {'key': 'properties.format', 'type': 'str'}, - 'storage_url': {'key': 'properties.storageUrl', 'type': 'str'}, - 'storage_url_expiry': {'key': 'properties.storageUrlExpiry', 'type': 'iso-8601'}, - 'last_activation_time': {'key': 'properties.lastActivationTime', 'type': 'iso-8601'}, + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, } - def __init__(self): - super(ApplicationPackage, self).__init__() - self.state = None - self.format = None - self.storage_url = None - self.storage_url_expiry = None - self.last_activation_time = None + def __init__(self, **kwargs): + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = kwargs.get('enable_automatic_updates', None) -class AutoStorageProperties(AutoStorageBaseProperties): - """Contains information about the auto-storage account associated with a Batch - account. +class WindowsUserConfiguration(Model): + """Properties used to create a user account on a Windows node. - :param storage_account_id: The resource ID of the storage account to be - used for auto-storage account. - :type storage_account_id: str - :param last_key_sync: The UTC time at which storage keys were last - synchronized with the Batch account. - :type last_key_sync: datetime + :param login_mode: Login mode for user. Specifies login mode for the user. + The default value for VirtualMachineConfiguration pools is interactive + mode and for CloudServiceConfiguration pools is batch mode. Possible + values include: 'Batch', 'Interactive' + :type login_mode: str or ~azure.mgmt.batch.models.LoginMode """ - _validation = { - 'storage_account_id': {'required': True}, - 'last_key_sync': {'required': True}, - } - _attribute_map = { - 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, - 'last_key_sync': {'key': 'lastKeySync', 'type': 'iso-8601'}, + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, } - def __init__(self, storage_account_id, last_key_sync): - super(AutoStorageProperties, self).__init__(storage_account_id=storage_account_id) - self.last_key_sync = last_key_sync - - -class BatchAccount(Resource): - """Contains information about an Azure Batch account. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar location: The location of the resource. - :vartype location: str - :ivar tags: The tags of the resource. - :vartype tags: dict[str, str] - :ivar account_endpoint: The account endpoint used to interact with the - Batch service. - :vartype account_endpoint: str - :ivar provisioning_state: The provisioned state of the resource. Possible - values include: 'Invalid', 'Creating', 'Deleting', 'Succeeded', 'Failed', - 'Cancelled' - :vartype provisioning_state: str or - ~azure.mgmt.batch.models.ProvisioningState - :ivar pool_allocation_mode: The allocation mode to use for creating pools - in the Batch account. Possible values include: 'BatchService', - 'UserSubscription' - :vartype pool_allocation_mode: str or - ~azure.mgmt.batch.models.PoolAllocationMode - :ivar key_vault_reference: A reference to the Azure key vault associated - with the Batch account. - :vartype key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference - :ivar auto_storage: The properties and status of any auto-storage account - associated with the Batch account. - :vartype auto_storage: ~azure.mgmt.batch.models.AutoStorageProperties - :ivar dedicated_core_quota: The dedicated core quota for this Batch - account. - :vartype dedicated_core_quota: int - :ivar low_priority_core_quota: The low-priority core quota for this Batch - account. - :vartype low_priority_core_quota: int - :ivar pool_quota: The pool quota for this Batch account. - :vartype pool_quota: int - :ivar active_job_and_job_schedule_quota: The active job and job schedule - quota for this Batch account. - :vartype active_job_and_job_schedule_quota: int - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'location': {'readonly': True}, - 'tags': {'readonly': True}, - 'account_endpoint': {'readonly': True}, - 'provisioning_state': {'readonly': True}, - 'pool_allocation_mode': {'readonly': True}, - 'key_vault_reference': {'readonly': True}, - 'auto_storage': {'readonly': True}, - 'dedicated_core_quota': {'readonly': True}, - 'low_priority_core_quota': {'readonly': True}, - 'pool_quota': {'readonly': True}, - 'active_job_and_job_schedule_quota': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - 'account_endpoint': {'key': 'properties.accountEndpoint', 'type': 'str'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'}, - 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, - 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, - 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageProperties'}, - 'dedicated_core_quota': {'key': 'properties.dedicatedCoreQuota', 'type': 'int'}, - 'low_priority_core_quota': {'key': 'properties.lowPriorityCoreQuota', 'type': 'int'}, - 'pool_quota': {'key': 'properties.poolQuota', 'type': 'int'}, - 'active_job_and_job_schedule_quota': {'key': 'properties.activeJobAndJobScheduleQuota', 'type': 'int'}, - } - - def __init__(self): - super(BatchAccount, self).__init__() - self.account_endpoint = None - self.provisioning_state = None - self.pool_allocation_mode = None - self.key_vault_reference = None - self.auto_storage = None - self.dedicated_core_quota = None - self.low_priority_core_quota = None - self.pool_quota = None - self.active_job_and_job_schedule_quota = None - - -class Certificate(ProxyResource): - """Contains information about a certificate. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param thumbprint_algorithm: The algorithm of the certificate thumbprint. - This must match the first portion of the certificate name. Currently - required to be 'SHA1'. - :type thumbprint_algorithm: str - :param thumbprint: The thumbprint of the certificate. This must match the - thumbprint from the name. - :type thumbprint: str - :param format: The format of the certificate - either Pfx or Cer. If - omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' - :type format: str or ~azure.mgmt.batch.models.CertificateFormat - :ivar provisioning_state: The provisioned state of the resource. Possible - values include: 'Succeeded', 'Deleting', 'Failed' - :vartype provisioning_state: str or - ~azure.mgmt.batch.models.CertificateProvisioningState - :ivar provisioning_state_transition_time: The time at which the - certificate entered its current state. - :vartype provisioning_state_transition_time: datetime - :ivar previous_provisioning_state: The previous provisioned state of the - resource. Possible values include: 'Succeeded', 'Deleting', 'Failed' - :vartype previous_provisioning_state: str or - ~azure.mgmt.batch.models.CertificateProvisioningState - :ivar previous_provisioning_state_transition_time: The time at which the - certificate entered its previous state. - :vartype previous_provisioning_state_transition_time: datetime - :ivar public_data: The public key of the certificate. - :vartype public_data: str - :ivar delete_certificate_error: The error which occurred while deleting - the certificate. This is only returned when the certificate - provisioningState is 'Failed'. - :vartype delete_certificate_error: - ~azure.mgmt.batch.models.DeleteCertificateError - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'provisioning_state': {'readonly': True}, - 'provisioning_state_transition_time': {'readonly': True}, - 'previous_provisioning_state': {'readonly': True}, - 'previous_provisioning_state_transition_time': {'readonly': True}, - 'public_data': {'readonly': True}, - 'delete_certificate_error': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, - 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, - 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'CertificateProvisioningState'}, - 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, - 'previous_provisioning_state': {'key': 'properties.previousProvisioningState', 'type': 'CertificateProvisioningState'}, - 'previous_provisioning_state_transition_time': {'key': 'properties.previousProvisioningStateTransitionTime', 'type': 'iso-8601'}, - 'public_data': {'key': 'properties.publicData', 'type': 'str'}, - 'delete_certificate_error': {'key': 'properties.deleteCertificateError', 'type': 'DeleteCertificateError'}, - } - - def __init__(self, thumbprint_algorithm=None, thumbprint=None, format=None): - super(Certificate, self).__init__() - self.thumbprint_algorithm = thumbprint_algorithm - self.thumbprint = thumbprint - self.format = format - self.provisioning_state = None - self.provisioning_state_transition_time = None - self.previous_provisioning_state = None - self.previous_provisioning_state_transition_time = None - self.public_data = None - self.delete_certificate_error = None - - -class CertificateCreateOrUpdateParameters(ProxyResource): - """Contains information about a certificate. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param thumbprint_algorithm: The algorithm of the certificate thumbprint. - This must match the first portion of the certificate name. Currently - required to be 'SHA1'. - :type thumbprint_algorithm: str - :param thumbprint: The thumbprint of the certificate. This must match the - thumbprint from the name. - :type thumbprint: str - :param format: The format of the certificate - either Pfx or Cer. If - omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' - :type format: str or ~azure.mgmt.batch.models.CertificateFormat - :param data: The base64-encoded contents of the certificate. The maximum - size is 10KB. - :type data: str - :param password: The password to access the certificate's private key. - This is required if the certificate format is pfx and must be omitted if - the certificate format is cer. - :type password: str - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'data': {'required': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, - 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, - 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, - 'data': {'key': 'properties.data', 'type': 'str'}, - 'password': {'key': 'properties.password', 'type': 'str'}, - } - - def __init__(self, data, thumbprint_algorithm=None, thumbprint=None, format=None, password=None): - super(CertificateCreateOrUpdateParameters, self).__init__() - self.thumbprint_algorithm = thumbprint_algorithm - self.thumbprint = thumbprint - self.format = format - self.data = data - self.password = password - - -class Pool(ProxyResource): - """Contains information about a pool. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param display_name: The display name for the pool. The display name need - not be unique and can contain any Unicode characters up to a maximum - length of 1024. - :type display_name: str - :ivar last_modified: The last modified time of the pool. This is the last - time at which the pool level data, such as the targetDedicatedNodes or - autoScaleSettings, changed. It does not factor in node-level changes such - as a compute node changing state. - :vartype last_modified: datetime - :ivar creation_time: The creation time of the pool. - :vartype creation_time: datetime - :ivar provisioning_state: The current state of the pool. Possible values - include: 'Succeeded', 'Deleting' - :vartype provisioning_state: str or - ~azure.mgmt.batch.models.PoolProvisioningState - :ivar provisioning_state_transition_time: The time at which the pool - entered its current state. - :vartype provisioning_state_transition_time: datetime - :ivar allocation_state: Whether the pool is resizing. Possible values - include: 'Steady', 'Resizing', 'Stopping' - :vartype allocation_state: str or ~azure.mgmt.batch.models.AllocationState - :ivar allocation_state_transition_time: The time at which the pool entered - its current allocation state. - :vartype allocation_state_transition_time: datetime - :param vm_size: The size of virtual machines in the pool. All VMs in a - pool are the same size. For information about available sizes of virtual - machines for Cloud Services pools (pools created with - cloudServiceConfiguration), see Sizes for Cloud Services - (http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). - Batch supports all Cloud Services VM sizes except ExtraSmall. For - information about available VM sizes for pools using images from the - Virtual Machines Marketplace (pools created with - virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) - (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) - or Sizes for Virtual Machines (Windows) - (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). - Batch supports all Azure VM sizes except STANDARD_A0 and those with - premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). - :type vm_size: str - :param deployment_configuration: This property describes how the pool - nodes will be deployed - using Cloud Services or Virtual Machines. Using - CloudServiceConfiguration specifies that the nodes should be creating - using Azure Cloud Services (PaaS), while VirtualMachineConfiguration uses - Azure Virtual Machines (IaaS). - :type deployment_configuration: - ~azure.mgmt.batch.models.DeploymentConfiguration - :ivar current_dedicated_nodes: The number of compute nodes currently in - the pool. - :vartype current_dedicated_nodes: int - :ivar current_low_priority_nodes: The number of low priority compute nodes - currently in the pool. - :vartype current_low_priority_nodes: int - :param scale_settings: Settings which configure the number of nodes in the - pool. - :type scale_settings: ~azure.mgmt.batch.models.ScaleSettings - :ivar auto_scale_run: The results and errors from the last execution of - the autoscale formula. This property is set only if the pool automatically - scales, i.e. autoScaleSettings are used. - :vartype auto_scale_run: ~azure.mgmt.batch.models.AutoScaleRun - :param inter_node_communication: Whether the pool permits direct - communication between nodes. This imposes restrictions on which nodes can - be assigned to the pool. Enabling this value can reduce the chance of the - requested number of nodes to be allocated in the pool. If not specified, - this value defaults to 'Disabled'. Possible values include: 'Enabled', - 'Disabled' - :type inter_node_communication: str or - ~azure.mgmt.batch.models.InterNodeCommunicationState - :param network_configuration: The network configuration for the pool. - :type network_configuration: ~azure.mgmt.batch.models.NetworkConfiguration - :param max_tasks_per_node: The maximum number of tasks that can run - concurrently on a single compute node in the pool. - :type max_tasks_per_node: int - :param task_scheduling_policy: How tasks are distributed across compute - nodes in a pool. - :type task_scheduling_policy: - ~azure.mgmt.batch.models.TaskSchedulingPolicy - :param user_accounts: The list of user accounts to be created on each node - in the pool. - :type user_accounts: list[~azure.mgmt.batch.models.UserAccount] - :param metadata: A list of name-value pairs associated with the pool as - metadata. The Batch service does not assign any meaning to metadata; it is - solely for the use of user code. - :type metadata: list[~azure.mgmt.batch.models.MetadataItem] - :param start_task: A task specified to run on each compute node as it - joins the pool. In an PATCH (update) operation, this property can be set - to an empty object to remove the start task from the pool. - :type start_task: ~azure.mgmt.batch.models.StartTask - :param certificates: The list of certificates to be installed on each - compute node in the pool. For Windows compute nodes, the Batch service - installs the certificates to the specified certificate store and location. - For Linux compute nodes, the certificates are stored in a directory inside - the task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this - location. For certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and certificates are placed in that directory. - :type certificates: list[~azure.mgmt.batch.models.CertificateReference] - :param application_packages: The list of application packages to be - installed on each compute node in the pool. Changes to application - packages affect all new compute nodes joining the pool, but do not affect - compute nodes that are already in the pool until they are rebooted or - reimaged. - :type application_packages: - list[~azure.mgmt.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses the Batch - service will make available on each compute node in the pool. The list of - application licenses must be a subset of available Batch service - application licenses. If a license is requested which is not supported, - pool creation will fail. - :type application_licenses: list[str] - :ivar resize_operation_status: Contains details about the current or last - completed resize operation. - :vartype resize_operation_status: - ~azure.mgmt.batch.models.ResizeOperationStatus - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'last_modified': {'readonly': True}, - 'creation_time': {'readonly': True}, - 'provisioning_state': {'readonly': True}, - 'provisioning_state_transition_time': {'readonly': True}, - 'allocation_state': {'readonly': True}, - 'allocation_state_transition_time': {'readonly': True}, - 'current_dedicated_nodes': {'readonly': True}, - 'current_low_priority_nodes': {'readonly': True}, - 'auto_scale_run': {'readonly': True}, - 'resize_operation_status': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'display_name': {'key': 'properties.displayName', 'type': 'str'}, - 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'PoolProvisioningState'}, - 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, - 'allocation_state': {'key': 'properties.allocationState', 'type': 'AllocationState'}, - 'allocation_state_transition_time': {'key': 'properties.allocationStateTransitionTime', 'type': 'iso-8601'}, - 'vm_size': {'key': 'properties.vmSize', 'type': 'str'}, - 'deployment_configuration': {'key': 'properties.deploymentConfiguration', 'type': 'DeploymentConfiguration'}, - 'current_dedicated_nodes': {'key': 'properties.currentDedicatedNodes', 'type': 'int'}, - 'current_low_priority_nodes': {'key': 'properties.currentLowPriorityNodes', 'type': 'int'}, - 'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'}, - 'auto_scale_run': {'key': 'properties.autoScaleRun', 'type': 'AutoScaleRun'}, - 'inter_node_communication': {'key': 'properties.interNodeCommunication', 'type': 'InterNodeCommunicationState'}, - 'network_configuration': {'key': 'properties.networkConfiguration', 'type': 'NetworkConfiguration'}, - 'max_tasks_per_node': {'key': 'properties.maxTasksPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'properties.taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'user_accounts': {'key': 'properties.userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'properties.metadata', 'type': '[MetadataItem]'}, - 'start_task': {'key': 'properties.startTask', 'type': 'StartTask'}, - 'certificates': {'key': 'properties.certificates', 'type': '[CertificateReference]'}, - 'application_packages': {'key': 'properties.applicationPackages', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'properties.applicationLicenses', 'type': '[str]'}, - 'resize_operation_status': {'key': 'properties.resizeOperationStatus', 'type': 'ResizeOperationStatus'}, - } - - def __init__(self, display_name=None, vm_size=None, deployment_configuration=None, scale_settings=None, inter_node_communication=None, network_configuration=None, max_tasks_per_node=None, task_scheduling_policy=None, user_accounts=None, metadata=None, start_task=None, certificates=None, application_packages=None, application_licenses=None): - super(Pool, self).__init__() - self.display_name = display_name - self.last_modified = None - self.creation_time = None - self.provisioning_state = None - self.provisioning_state_transition_time = None - self.allocation_state = None - self.allocation_state_transition_time = None - self.vm_size = vm_size - self.deployment_configuration = deployment_configuration - self.current_dedicated_nodes = None - self.current_low_priority_nodes = None - self.scale_settings = scale_settings - self.auto_scale_run = None - self.inter_node_communication = inter_node_communication - self.network_configuration = network_configuration - self.max_tasks_per_node = max_tasks_per_node - self.task_scheduling_policy = task_scheduling_policy - self.user_accounts = user_accounts - self.metadata = metadata - self.start_task = start_task - self.certificates = certificates - self.application_packages = application_packages - self.application_licenses = application_licenses - self.resize_operation_status = None + def __init__(self, **kwargs): + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = kwargs.get('login_mode', None) diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models_py3.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models_py3.py index c64ee92d4c21..edb71960c6fb 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models_py3.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_models_py3.py @@ -16,7 +16,10 @@ class ActivateApplicationPackageParameters(Model): """Parameters for an activating an application package. - :param format: The format of the application package binary file. + All required parameters must be populated in order to send to Azure. + + :param format: Required. The format of the application package binary + file. :type format: str """ @@ -28,17 +31,170 @@ class ActivateApplicationPackageParameters(Model): 'format': {'key': 'format', 'type': 'str'}, } - def __init__(self, format): - super(ActivateApplicationPackageParameters, self).__init__() + def __init__(self, *, format: str, **kwargs) -> None: + super(ActivateApplicationPackageParameters, self).__init__(**kwargs) self.format = format +class ProxyResource(Model): + """A definition of an Azure resource. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(ProxyResource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.etag = None + + +class Application(ProxyResource): + """Contains information about an application in a Batch account. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :param display_name: The display name for the application. + :type display_name: str + :param allow_updates: A value indicating whether packages within the + application may be overwritten using the same version string. + :type allow_updates: bool + :param default_version: The package to use if a client requests the + application but does not specify a version. This property can only be set + to the name of an existing package. + :type default_version: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'display_name': {'key': 'properties.displayName', 'type': 'str'}, + 'allow_updates': {'key': 'properties.allowUpdates', 'type': 'bool'}, + 'default_version': {'key': 'properties.defaultVersion', 'type': 'str'}, + } + + def __init__(self, *, display_name: str=None, allow_updates: bool=None, default_version: str=None, **kwargs) -> None: + super(Application, self).__init__(**kwargs) + self.display_name = display_name + self.allow_updates = allow_updates + self.default_version = default_version + + +class ApplicationPackage(ProxyResource): + """An application package which represents a particular version of an + application. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :ivar state: The current state of the application package. Possible values + include: 'Pending', 'Active' + :vartype state: str or ~azure.mgmt.batch.models.PackageState + :ivar format: The format of the application package, if the package is + active. + :vartype format: str + :ivar storage_url: The URL for the application package in Azure Storage. + :vartype storage_url: str + :ivar storage_url_expiry: The UTC time at which the Azure Storage URL will + expire. + :vartype storage_url_expiry: datetime + :ivar last_activation_time: The time at which the package was last + activated, if the package is active. + :vartype last_activation_time: datetime + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + 'state': {'readonly': True}, + 'format': {'readonly': True}, + 'storage_url': {'readonly': True}, + 'storage_url_expiry': {'readonly': True}, + 'last_activation_time': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'state': {'key': 'properties.state', 'type': 'PackageState'}, + 'format': {'key': 'properties.format', 'type': 'str'}, + 'storage_url': {'key': 'properties.storageUrl', 'type': 'str'}, + 'storage_url_expiry': {'key': 'properties.storageUrlExpiry', 'type': 'iso-8601'}, + 'last_activation_time': {'key': 'properties.lastActivationTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs) -> None: + super(ApplicationPackage, self).__init__(**kwargs) + self.state = None + self.format = None + self.storage_url = None + self.storage_url_expiry = None + self.last_activation_time = None + + class ApplicationPackageReference(Model): """Link to an application package inside the batch account. - :param id: The ID of the application package to install. This must be - inside the same batch account as the pool. This can either be a reference - to a specific version or the default version if one exists. + All required parameters must be populated in order to send to Azure. + + :param id: Required. The ID of the application package to install. This + must be inside the same batch account as the pool. This can either be a + reference to a specific version or the default version if one exists. :type id: str :param version: The version of the application to deploy. If omitted, the default version is deployed. If this is omitted, and no default version is @@ -57,8 +213,8 @@ class ApplicationPackageReference(Model): 'version': {'key': 'version', 'type': 'str'}, } - def __init__(self, id, version=None): - super(ApplicationPackageReference, self).__init__() + def __init__(self, *, id: str, version: str=None, **kwargs) -> None: + super(ApplicationPackageReference, self).__init__(**kwargs) self.id = id self.version = version @@ -66,8 +222,10 @@ def __init__(self, id, version=None): class AutoScaleRun(Model): """The results and errors from an execution of a pool autoscale formula. - :param evaluation_time: The time at which the autoscale formula was last - evaluated. + All required parameters must be populated in order to send to Azure. + + :param evaluation_time: Required. The time at which the autoscale formula + was last evaluated. :type evaluation_time: datetime :param results: The final values of all variables used in the evaluation of the autoscale formula. Each variable value is returned in the form @@ -88,8 +246,8 @@ class AutoScaleRun(Model): 'error': {'key': 'error', 'type': 'AutoScaleRunError'}, } - def __init__(self, evaluation_time, results=None, error=None): - super(AutoScaleRun, self).__init__() + def __init__(self, *, evaluation_time, results: str=None, error=None, **kwargs) -> None: + super(AutoScaleRun, self).__init__(**kwargs) self.evaluation_time = evaluation_time self.results = results self.error = error @@ -98,11 +256,13 @@ def __init__(self, evaluation_time, results=None, error=None): class AutoScaleRunError(Model): """An error that occurred when autoscaling a pool. - :param code: An identifier for the error. Codes are invariant and are - intended to be consumed programmatically. + All required parameters must be populated in order to send to Azure. + + :param code: Required. An identifier for the error. Codes are invariant + and are intended to be consumed programmatically. :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. + :param message: Required. A message describing the error, intended to be + suitable for display in a user interface. :type message: str :param details: Additional details about the error. :type details: list[~azure.mgmt.batch.models.AutoScaleRunError] @@ -119,8 +279,8 @@ class AutoScaleRunError(Model): 'details': {'key': 'details', 'type': '[AutoScaleRunError]'}, } - def __init__(self, code, message, details=None): - super(AutoScaleRunError, self).__init__() + def __init__(self, *, code: str, message: str, details=None, **kwargs) -> None: + super(AutoScaleRunError, self).__init__(**kwargs) self.code = code self.message = message self.details = details @@ -129,8 +289,10 @@ def __init__(self, code, message, details=None): class AutoScaleSettings(Model): """AutoScale settings for the pool. - :param formula: A formula for the desired number of compute nodes in the - pool. + All required parameters must be populated in order to send to Azure. + + :param formula: Required. A formula for the desired number of compute + nodes in the pool. :type formula: str :param evaluation_interval: The time interval at which to automatically adjust the pool size according to the autoscale formula. If omitted, the @@ -147,8 +309,8 @@ class AutoScaleSettings(Model): 'evaluation_interval': {'key': 'evaluationInterval', 'type': 'duration'}, } - def __init__(self, formula, evaluation_interval=None): - super(AutoScaleSettings, self).__init__() + def __init__(self, *, formula: str, evaluation_interval=None, **kwargs) -> None: + super(AutoScaleSettings, self).__init__(**kwargs) self.formula = formula self.evaluation_interval = evaluation_interval @@ -156,8 +318,10 @@ def __init__(self, formula, evaluation_interval=None): class AutoStorageBaseProperties(Model): """The properties related to the auto-storage account. - :param storage_account_id: The resource ID of the storage account to be - used for auto-storage account. + All required parameters must be populated in order to send to Azure. + + :param storage_account_id: Required. The resource ID of the storage + account to be used for auto-storage account. :type storage_account_id: str """ @@ -169,11 +333,40 @@ class AutoStorageBaseProperties(Model): 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, } - def __init__(self, storage_account_id): - super(AutoStorageBaseProperties, self).__init__() + def __init__(self, *, storage_account_id: str, **kwargs) -> None: + super(AutoStorageBaseProperties, self).__init__(**kwargs) self.storage_account_id = storage_account_id +class AutoStorageProperties(AutoStorageBaseProperties): + """Contains information about the auto-storage account associated with a Batch + account. + + All required parameters must be populated in order to send to Azure. + + :param storage_account_id: Required. The resource ID of the storage + account to be used for auto-storage account. + :type storage_account_id: str + :param last_key_sync: Required. The UTC time at which storage keys were + last synchronized with the Batch account. + :type last_key_sync: datetime + """ + + _validation = { + 'storage_account_id': {'required': True}, + 'last_key_sync': {'required': True}, + } + + _attribute_map = { + 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, + 'last_key_sync': {'key': 'lastKeySync', 'type': 'iso-8601'}, + } + + def __init__(self, *, storage_account_id: str, last_key_sync, **kwargs) -> None: + super(AutoStorageProperties, self).__init__(storage_account_id=storage_account_id, **kwargs) + self.last_key_sync = last_key_sync + + class AutoUserSpecification(Model): """Specifies the parameters for the auto user that runs a task on the Batch service. @@ -194,84 +387,249 @@ class AutoUserSpecification(Model): 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, } - def __init__(self, scope=None, elevation_level=None): - super(AutoUserSpecification, self).__init__() + def __init__(self, *, scope=None, elevation_level=None, **kwargs) -> None: + super(AutoUserSpecification, self).__init__(**kwargs) self.scope = scope self.elevation_level = elevation_level -class BatchAccountCreateParameters(Model): - """Parameters supplied to the Create operation. +class Resource(Model): + """A definition of an Azure resource. - :param location: The region in which to create the account. - :type location: str - :param tags: The user-specified tags associated with the account. - :type tags: dict[str, str] - :param auto_storage: The properties related to the auto-storage account. - :type auto_storage: ~azure.mgmt.batch.models.AutoStorageBaseProperties - :param pool_allocation_mode: The allocation mode to use for creating pools - in the Batch account. The pool allocation mode also affects how clients - may authenticate to the Batch Service API. If the mode is BatchService, - clients may authenticate using access keys or Azure Active Directory. If - the mode is UserSubscription, clients must use Azure Active Directory. The - default is BatchService. Possible values include: 'BatchService', - 'UserSubscription' - :type pool_allocation_mode: str or - ~azure.mgmt.batch.models.PoolAllocationMode - :param key_vault_reference: A reference to the Azure key vault associated - with the Batch account. - :type key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar location: The location of the resource. + :vartype location: str + :ivar tags: The tags of the resource. + :vartype tags: dict[str, str] """ _validation = { - 'location': {'required': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'tags': {'readonly': True}, } _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, - 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageBaseProperties'}, - 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, - 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, } - def __init__(self, location, tags=None, auto_storage=None, pool_allocation_mode=None, key_vault_reference=None): - super(BatchAccountCreateParameters, self).__init__() - self.location = location - self.tags = tags - self.auto_storage = auto_storage - self.pool_allocation_mode = pool_allocation_mode - self.key_vault_reference = key_vault_reference + def __init__(self, **kwargs) -> None: + super(Resource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None + self.location = None + self.tags = None -class BatchAccountKeys(Model): - """A set of Azure Batch account keys. +class BatchAccount(Resource): + """Contains information about an Azure Batch account. Variables are only populated by the server, and will be ignored when sending a request. - :ivar account_name: The Batch account name. - :vartype account_name: str - :ivar primary: The primary key associated with the account. - :vartype primary: str - :ivar secondary: The secondary key associated with the account. - :vartype secondary: str - """ - - _validation = { - 'account_name': {'readonly': True}, - 'primary': {'readonly': True}, - 'secondary': {'readonly': True}, - } - - _attribute_map = { - 'account_name': {'key': 'accountName', 'type': 'str'}, - 'primary': {'key': 'primary', 'type': 'str'}, + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar location: The location of the resource. + :vartype location: str + :ivar tags: The tags of the resource. + :vartype tags: dict[str, str] + :ivar account_endpoint: The account endpoint used to interact with the + Batch service. + :vartype account_endpoint: str + :ivar provisioning_state: The provisioned state of the resource. Possible + values include: 'Invalid', 'Creating', 'Deleting', 'Succeeded', 'Failed', + 'Cancelled' + :vartype provisioning_state: str or + ~azure.mgmt.batch.models.ProvisioningState + :ivar pool_allocation_mode: The allocation mode to use for creating pools + in the Batch account. Possible values include: 'BatchService', + 'UserSubscription' + :vartype pool_allocation_mode: str or + ~azure.mgmt.batch.models.PoolAllocationMode + :ivar key_vault_reference: A reference to the Azure key vault associated + with the Batch account. + :vartype key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference + :ivar auto_storage: The properties and status of any auto-storage account + associated with the Batch account. + :vartype auto_storage: ~azure.mgmt.batch.models.AutoStorageProperties + :ivar dedicated_core_quota: The dedicated core quota for the Batch + account. For accounts with PoolAllocationMode set to UserSubscription, + quota is managed on the subscription so this value is not returned. + :vartype dedicated_core_quota: int + :ivar low_priority_core_quota: The low-priority core quota for the Batch + account. For accounts with PoolAllocationMode set to UserSubscription, + quota is managed on the subscription so this value is not returned. + :vartype low_priority_core_quota: int + :ivar dedicated_core_quota_per_vm_family: A list of the dedicated core + quota per Virtual Machine family for the Batch account. For accounts with + PoolAllocationMode set to UserSubscription, quota is managed on the + subscription so this value is not returned. + :vartype dedicated_core_quota_per_vm_family: + list[~azure.mgmt.batch.models.VirtualMachineFamilyCoreQuota] + :ivar dedicated_core_quota_per_vm_family_enforced: A value indicating + whether the core quota for the Batch Account is enforced per Virtual + Machine family or not. Batch is transitioning its core quota system for + dedicated cores to be enforced per Virtual Machine family. During this + transitional phase, the dedicated core quota per Virtual Machine family + may not yet be enforced. If this flag is false, dedicated core quota is + enforced via the old dedicatedCoreQuota property on the account and does + not consider Virtual Machine family. If this flag is true, dedicated core + quota is enforced via the dedicatedCoreQuotaPerVMFamily property on the + account, and the old dedicatedCoreQuota does not apply. + :vartype dedicated_core_quota_per_vm_family_enforced: bool + :ivar pool_quota: The pool quota for the Batch account. + :vartype pool_quota: int + :ivar active_job_and_job_schedule_quota: The active job and job schedule + quota for the Batch account. + :vartype active_job_and_job_schedule_quota: int + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'location': {'readonly': True}, + 'tags': {'readonly': True}, + 'account_endpoint': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'pool_allocation_mode': {'readonly': True}, + 'key_vault_reference': {'readonly': True}, + 'auto_storage': {'readonly': True}, + 'dedicated_core_quota': {'readonly': True}, + 'low_priority_core_quota': {'readonly': True}, + 'dedicated_core_quota_per_vm_family': {'readonly': True}, + 'dedicated_core_quota_per_vm_family_enforced': {'readonly': True}, + 'pool_quota': {'readonly': True}, + 'active_job_and_job_schedule_quota': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'account_endpoint': {'key': 'properties.accountEndpoint', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'}, + 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, + 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, + 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageProperties'}, + 'dedicated_core_quota': {'key': 'properties.dedicatedCoreQuota', 'type': 'int'}, + 'low_priority_core_quota': {'key': 'properties.lowPriorityCoreQuota', 'type': 'int'}, + 'dedicated_core_quota_per_vm_family': {'key': 'properties.dedicatedCoreQuotaPerVMFamily', 'type': '[VirtualMachineFamilyCoreQuota]'}, + 'dedicated_core_quota_per_vm_family_enforced': {'key': 'properties.dedicatedCoreQuotaPerVMFamilyEnforced', 'type': 'bool'}, + 'pool_quota': {'key': 'properties.poolQuota', 'type': 'int'}, + 'active_job_and_job_schedule_quota': {'key': 'properties.activeJobAndJobScheduleQuota', 'type': 'int'}, + } + + def __init__(self, **kwargs) -> None: + super(BatchAccount, self).__init__(**kwargs) + self.account_endpoint = None + self.provisioning_state = None + self.pool_allocation_mode = None + self.key_vault_reference = None + self.auto_storage = None + self.dedicated_core_quota = None + self.low_priority_core_quota = None + self.dedicated_core_quota_per_vm_family = None + self.dedicated_core_quota_per_vm_family_enforced = None + self.pool_quota = None + self.active_job_and_job_schedule_quota = None + + +class BatchAccountCreateParameters(Model): + """Parameters supplied to the Create operation. + + All required parameters must be populated in order to send to Azure. + + :param location: Required. The region in which to create the account. + :type location: str + :param tags: The user-specified tags associated with the account. + :type tags: dict[str, str] + :param auto_storage: The properties related to the auto-storage account. + :type auto_storage: ~azure.mgmt.batch.models.AutoStorageBaseProperties + :param pool_allocation_mode: The allocation mode to use for creating pools + in the Batch account. The pool allocation mode also affects how clients + may authenticate to the Batch Service API. If the mode is BatchService, + clients may authenticate using access keys or Azure Active Directory. If + the mode is UserSubscription, clients must use Azure Active Directory. The + default is BatchService. Possible values include: 'BatchService', + 'UserSubscription' + :type pool_allocation_mode: str or + ~azure.mgmt.batch.models.PoolAllocationMode + :param key_vault_reference: A reference to the Azure key vault associated + with the Batch account. + :type key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference + """ + + _validation = { + 'location': {'required': True}, + } + + _attribute_map = { + 'location': {'key': 'location', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageBaseProperties'}, + 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, + 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, + } + + def __init__(self, *, location: str, tags=None, auto_storage=None, pool_allocation_mode=None, key_vault_reference=None, **kwargs) -> None: + super(BatchAccountCreateParameters, self).__init__(**kwargs) + self.location = location + self.tags = tags + self.auto_storage = auto_storage + self.pool_allocation_mode = pool_allocation_mode + self.key_vault_reference = key_vault_reference + + +class BatchAccountKeys(Model): + """A set of Azure Batch account keys. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar account_name: The Batch account name. + :vartype account_name: str + :ivar primary: The primary key associated with the account. + :vartype primary: str + :ivar secondary: The secondary key associated with the account. + :vartype secondary: str + """ + + _validation = { + 'account_name': {'readonly': True}, + 'primary': {'readonly': True}, + 'secondary': {'readonly': True}, + } + + _attribute_map = { + 'account_name': {'key': 'accountName', 'type': 'str'}, + 'primary': {'key': 'primary', 'type': 'str'}, 'secondary': {'key': 'secondary', 'type': 'str'}, } - def __init__(self): - super(BatchAccountKeys, self).__init__() + def __init__(self, **kwargs) -> None: + super(BatchAccountKeys, self).__init__(**kwargs) self.account_name = None self.primary = None self.secondary = None @@ -280,8 +638,10 @@ def __init__(self): class BatchAccountRegenerateKeyParameters(Model): """Parameters supplied to the RegenerateKey operation. - :param key_name: The type of account key to regenerate. Possible values - include: 'Primary', 'Secondary' + All required parameters must be populated in order to send to Azure. + + :param key_name: Required. The type of account key to regenerate. Possible + values include: 'Primary', 'Secondary' :type key_name: str or ~azure.mgmt.batch.models.AccountKeyType """ @@ -293,8 +653,8 @@ class BatchAccountRegenerateKeyParameters(Model): 'key_name': {'key': 'keyName', 'type': 'AccountKeyType'}, } - def __init__(self, key_name): - super(BatchAccountRegenerateKeyParameters, self).__init__() + def __init__(self, *, key_name, **kwargs) -> None: + super(BatchAccountRegenerateKeyParameters, self).__init__(**kwargs) self.key_name = key_name @@ -312,8 +672,8 @@ class BatchAccountUpdateParameters(Model): 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageBaseProperties'}, } - def __init__(self, tags=None, auto_storage=None): - super(BatchAccountUpdateParameters, self).__init__() + def __init__(self, *, tags=None, auto_storage=None, **kwargs) -> None: + super(BatchAccountUpdateParameters, self).__init__(**kwargs) self.tags = tags self.auto_storage = auto_storage @@ -337,11 +697,100 @@ class BatchLocationQuota(Model): 'account_quota': {'key': 'accountQuota', 'type': 'int'}, } - def __init__(self): - super(BatchLocationQuota, self).__init__() + def __init__(self, **kwargs) -> None: + super(BatchLocationQuota, self).__init__(**kwargs) self.account_quota = None +class Certificate(ProxyResource): + """Contains information about a certificate. + + Variables are only populated by the server, and will be ignored when + sending a request. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :param thumbprint_algorithm: The algorithm of the certificate thumbprint. + This must match the first portion of the certificate name. Currently + required to be 'SHA1'. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate. This must match the + thumbprint from the name. + :type thumbprint: str + :param format: The format of the certificate - either Pfx or Cer. If + omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' + :type format: str or ~azure.mgmt.batch.models.CertificateFormat + :ivar provisioning_state: The provisioned state of the resource. Possible + values include: 'Succeeded', 'Deleting', 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.batch.models.CertificateProvisioningState + :ivar provisioning_state_transition_time: The time at which the + certificate entered its current state. + :vartype provisioning_state_transition_time: datetime + :ivar previous_provisioning_state: The previous provisioned state of the + resource. Possible values include: 'Succeeded', 'Deleting', 'Failed' + :vartype previous_provisioning_state: str or + ~azure.mgmt.batch.models.CertificateProvisioningState + :ivar previous_provisioning_state_transition_time: The time at which the + certificate entered its previous state. + :vartype previous_provisioning_state_transition_time: datetime + :ivar public_data: The public key of the certificate. + :vartype public_data: str + :ivar delete_certificate_error: The error which occurred while deleting + the certificate. This is only returned when the certificate + provisioningState is 'Failed'. + :vartype delete_certificate_error: + ~azure.mgmt.batch.models.DeleteCertificateError + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'provisioning_state_transition_time': {'readonly': True}, + 'previous_provisioning_state': {'readonly': True}, + 'previous_provisioning_state_transition_time': {'readonly': True}, + 'public_data': {'readonly': True}, + 'delete_certificate_error': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, + 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, + 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'CertificateProvisioningState'}, + 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, + 'previous_provisioning_state': {'key': 'properties.previousProvisioningState', 'type': 'CertificateProvisioningState'}, + 'previous_provisioning_state_transition_time': {'key': 'properties.previousProvisioningStateTransitionTime', 'type': 'iso-8601'}, + 'public_data': {'key': 'properties.publicData', 'type': 'str'}, + 'delete_certificate_error': {'key': 'properties.deleteCertificateError', 'type': 'DeleteCertificateError'}, + } + + def __init__(self, *, thumbprint_algorithm: str=None, thumbprint: str=None, format=None, **kwargs) -> None: + super(Certificate, self).__init__(**kwargs) + self.thumbprint_algorithm = thumbprint_algorithm + self.thumbprint = thumbprint + self.format = format + self.provisioning_state = None + self.provisioning_state_transition_time = None + self.previous_provisioning_state = None + self.previous_provisioning_state_transition_time = None + self.public_data = None + self.delete_certificate_error = None + + class CertificateBaseProperties(Model): """CertificateBaseProperties. @@ -363,36 +812,102 @@ class CertificateBaseProperties(Model): 'format': {'key': 'format', 'type': 'CertificateFormat'}, } - def __init__(self, thumbprint_algorithm=None, thumbprint=None, format=None): - super(CertificateBaseProperties, self).__init__() + def __init__(self, *, thumbprint_algorithm: str=None, thumbprint: str=None, format=None, **kwargs) -> None: + super(CertificateBaseProperties, self).__init__(**kwargs) self.thumbprint_algorithm = thumbprint_algorithm self.thumbprint = thumbprint self.format = format -class CertificateReference(Model): - """A reference to a certificate to be installed on compute nodes in a pool. - This must exist inside the same account as the pool. +class CertificateCreateOrUpdateParameters(ProxyResource): + """Contains information about a certificate. - :param id: The fully qualified ID of the certificate to install on the - pool. This must be inside the same batch account as the pool. - :type id: str - :param store_location: The location of the certificate store on the - compute node into which to install the certificate. The default value is - currentUser. This property is applicable only for pools configured with - Windows nodes (that is, created with cloudServiceConfiguration, or with - virtualMachineConfiguration using a Windows image reference). For Linux - compute nodes, the certificates are stored in a directory inside the task - working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is - supplied to the task to query for this location. For certificates with - visibility of 'remoteUser', a 'certs' directory is created in the user's - home directory (e.g., /home/{user-name}/certs) and certificates are placed - in that directory. Possible values include: 'CurrentUser', 'LocalMachine' - :type store_location: str or - ~azure.mgmt.batch.models.CertificateStoreLocation - :param store_name: The name of the certificate store on the compute node - into which to install the certificate. This property is applicable only - for pools configured with Windows nodes (that is, created with + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The ID of the resource. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. + :vartype type: str + :ivar etag: The ETag of the resource, used for concurrency statements. + :vartype etag: str + :param thumbprint_algorithm: The algorithm of the certificate thumbprint. + This must match the first portion of the certificate name. Currently + required to be 'SHA1'. + :type thumbprint_algorithm: str + :param thumbprint: The thumbprint of the certificate. This must match the + thumbprint from the name. + :type thumbprint: str + :param format: The format of the certificate - either Pfx or Cer. If + omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' + :type format: str or ~azure.mgmt.batch.models.CertificateFormat + :param data: Required. The base64-encoded contents of the certificate. The + maximum size is 10KB. + :type data: str + :param password: The password to access the certificate's private key. + This is required if the certificate format is pfx and must be omitted if + the certificate format is cer. + :type password: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, + 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, + 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, + 'data': {'key': 'properties.data', 'type': 'str'}, + 'password': {'key': 'properties.password', 'type': 'str'}, + } + + def __init__(self, *, data: str, thumbprint_algorithm: str=None, thumbprint: str=None, format=None, password: str=None, **kwargs) -> None: + super(CertificateCreateOrUpdateParameters, self).__init__(**kwargs) + self.thumbprint_algorithm = thumbprint_algorithm + self.thumbprint = thumbprint + self.format = format + self.data = data + self.password = password + + +class CertificateReference(Model): + """A reference to a certificate to be installed on compute nodes in a pool. + This must exist inside the same account as the pool. + + All required parameters must be populated in order to send to Azure. + + :param id: Required. The fully qualified ID of the certificate to install + on the pool. This must be inside the same batch account as the pool. + :type id: str + :param store_location: The location of the certificate store on the + compute node into which to install the certificate. The default value is + currentUser. This property is applicable only for pools configured with + Windows nodes (that is, created with cloudServiceConfiguration, or with + virtualMachineConfiguration using a Windows image reference). For Linux + compute nodes, the certificates are stored in a directory inside the task + working directory and an environment variable AZ_BATCH_CERTIFICATES_DIR is + supplied to the task to query for this location. For certificates with + visibility of 'remoteUser', a 'certs' directory is created in the user's + home directory (e.g., /home/{user-name}/certs) and certificates are placed + in that directory. Possible values include: 'CurrentUser', 'LocalMachine' + :type store_location: str or + ~azure.mgmt.batch.models.CertificateStoreLocation + :param store_name: The name of the certificate store on the compute node + into which to install the certificate. This property is applicable only + for pools configured with Windows nodes (that is, created with cloudServiceConfiguration, or with virtualMachineConfiguration using a Windows image reference). Common store names include: My, Root, CA, Trust, Disallowed, TrustedPeople, TrustedPublisher, AuthRoot, AddressBook, but @@ -415,8 +930,8 @@ class CertificateReference(Model): 'visibility': {'key': 'visibility', 'type': '[CertificateVisibility]'}, } - def __init__(self, id, store_location=None, store_name=None, visibility=None): - super(CertificateReference, self).__init__() + def __init__(self, *, id: str, store_location=None, store_name: str=None, visibility=None, **kwargs) -> None: + super(CertificateReference, self).__init__(**kwargs) self.id = id self.store_location = store_location self.store_name = store_name @@ -429,9 +944,11 @@ class CheckNameAvailabilityParameters(Model): Variables are only populated by the server, and will be ignored when sending a request. - :param name: The name to check for availability + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name to check for availability :type name: str - :ivar type: The resource type. Must be set to + :ivar type: Required. The resource type. Must be set to Microsoft.Batch/batchAccounts. Default value: "Microsoft.Batch/batchAccounts" . :vartype type: str @@ -449,8 +966,8 @@ class CheckNameAvailabilityParameters(Model): type = "Microsoft.Batch/batchAccounts" - def __init__(self, name): - super(CheckNameAvailabilityParameters, self).__init__() + def __init__(self, *, name: str, **kwargs) -> None: + super(CheckNameAvailabilityParameters, self).__init__(**kwargs) self.name = name @@ -485,23 +1002,85 @@ class CheckNameAvailabilityResult(Model): 'message': {'key': 'message', 'type': 'str'}, } - def __init__(self): - super(CheckNameAvailabilityResult, self).__init__() + def __init__(self, **kwargs) -> None: + super(CheckNameAvailabilityResult, self).__init__(**kwargs) self.name_available = None self.reason = None self.message = None +class CloudError(Model): + """An error response from the Batch service. + + :param error: + :type error: ~azure.mgmt.batch.models.CloudErrorBody + """ + + _attribute_map = { + 'error': {'key': 'error', 'type': 'CloudErrorBody'}, + } + + def __init__(self, *, error=None, **kwargs) -> None: + super(CloudError, self).__init__(**kwargs) + self.error = error + + +class CloudErrorException(HttpOperationError): + """Server responsed with exception of type: 'CloudError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args) + + +class CloudErrorBody(Model): + """An error response from the Batch service. + + :param code: An identifier for the error. Codes are invariant and are + intended to be consumed programmatically. + :type code: str + :param message: A message describing the error, intended to be suitable + for display in a user interface. + :type message: str + :param target: The target of the particular error. For example, the name + of the property in error. + :type target: str + :param details: A list of additional details about the error. + :type details: list[~azure.mgmt.batch.models.CloudErrorBody] + """ + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[CloudErrorBody]'}, + } + + def __init__(self, *, code: str=None, message: str=None, target: str=None, details=None, **kwargs) -> None: + super(CloudErrorBody, self).__init__(**kwargs) + self.code = code + self.message = message + self.target = target + self.details = details + + class CloudServiceConfiguration(Model): """The configuration for nodes in a pool based on the Azure Cloud Services platform. - :param os_family: The Azure Guest OS family to be installed on the virtual - machines in the pool. Possible values are: 2 - OS Family 2, equivalent to - Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to Windows Server - 2012. 4 - OS Family 4, equivalent to Windows Server 2012 R2. 5 - OS Family - 5, equivalent to Windows Server 2016. For more information, see Azure - Guest OS Releases + All required parameters must be populated in order to send to Azure. + + :param os_family: Required. The Azure Guest OS family to be installed on + the virtual machines in the pool. Possible values are: 2 - OS Family 2, + equivalent to Windows Server 2008 R2 SP1. 3 - OS Family 3, equivalent to + Windows Server 2012. 4 - OS Family 4, equivalent to Windows Server 2012 + R2. 5 - OS Family 5, equivalent to Windows Server 2016. 6 - OS Family 6, + equivalent to Windows Server 2019. For more information, see Azure Guest + OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). :type os_family: str :param os_version: The Azure Guest OS version to be installed on the @@ -519,8 +1098,8 @@ class CloudServiceConfiguration(Model): 'os_version': {'key': 'osVersion', 'type': 'str'}, } - def __init__(self, os_family, os_version=None): - super(CloudServiceConfiguration, self).__init__() + def __init__(self, *, os_family: str, os_version: str=None, **kwargs) -> None: + super(CloudServiceConfiguration, self).__init__(**kwargs) self.os_family = os_family self.os_version = os_version @@ -531,7 +1110,9 @@ class ContainerConfiguration(Model): Variables are only populated by the server, and will be ignored when sending a request. - :ivar type: The container technology to be used. Default value: + All required parameters must be populated in order to send to Azure. + + :ivar type: Required. The container technology to be used. Default value: "DockerCompatible" . :vartype type: str :param container_image_names: The collection of container image names. @@ -559,8 +1140,8 @@ class ContainerConfiguration(Model): type = "DockerCompatible" - def __init__(self, container_image_names=None, container_registries=None): - super(ContainerConfiguration, self).__init__() + def __init__(self, *, container_image_names=None, container_registries=None, **kwargs) -> None: + super(ContainerConfiguration, self).__init__(**kwargs) self.container_image_names = container_image_names self.container_registries = container_registries @@ -568,12 +1149,14 @@ def __init__(self, container_image_names=None, container_registries=None): class ContainerRegistry(Model): """A private container registry. + All required parameters must be populated in order to send to Azure. + :param registry_server: The registry URL. If omitted, the default is "docker.io". :type registry_server: str - :param user_name: The user name to log into the registry server. + :param user_name: Required. The user name to log into the registry server. :type user_name: str - :param password: The password to log into the registry server. + :param password: Required. The password to log into the registry server. :type password: str """ @@ -588,8 +1171,8 @@ class ContainerRegistry(Model): 'password': {'key': 'password', 'type': 'str'}, } - def __init__(self, user_name, password, registry_server=None): - super(ContainerRegistry, self).__init__() + def __init__(self, *, user_name: str, password: str, registry_server: str=None, **kwargs) -> None: + super(ContainerRegistry, self).__init__(**kwargs) self.registry_server = registry_server self.user_name = user_name self.password = password @@ -599,9 +1182,11 @@ class DataDisk(Model): """Data Disk settings which will be used by the data disks associated to Compute Nodes in the pool. - :param lun: The logical unit number. The lun is used to uniquely identify - each data disk. If attaching multiple disks, each should have a distinct - lun. + All required parameters must be populated in order to send to Azure. + + :param lun: Required. The logical unit number. The lun is used to uniquely + identify each data disk. If attaching multiple disks, each should have a + distinct lun. :type lun: int :param caching: The type of caching to be enabled for the data disks. Values are: @@ -613,8 +1198,8 @@ class DataDisk(Model): https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. Possible values include: 'None', 'ReadOnly', 'ReadWrite' :type caching: str or ~azure.mgmt.batch.models.CachingType - :param disk_size_gb: The initial disk size in GB when creating new data - disk. + :param disk_size_gb: Required. The initial disk size in GB when creating + new data disk. :type disk_size_gb: int :param storage_account_type: The storage account type to be used for the data disk. If omitted, the default is "Standard_LRS". Values are: @@ -638,8 +1223,8 @@ class DataDisk(Model): 'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountType'}, } - def __init__(self, lun, disk_size_gb, caching=None, storage_account_type=None): - super(DataDisk, self).__init__() + def __init__(self, *, lun: int, disk_size_gb: int, caching=None, storage_account_type=None, **kwargs) -> None: + super(DataDisk, self).__init__(**kwargs) self.lun = lun self.caching = caching self.disk_size_gb = disk_size_gb @@ -649,11 +1234,13 @@ def __init__(self, lun, disk_size_gb, caching=None, storage_account_type=None): class DeleteCertificateError(Model): """An error response from the Batch service. - :param code: An identifier for the error. Codes are invariant and are - intended to be consumed programmatically. + All required parameters must be populated in order to send to Azure. + + :param code: Required. An identifier for the error. Codes are invariant + and are intended to be consumed programmatically. :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. + :param message: Required. A message describing the error, intended to be + suitable for display in a user interface. :type message: str :param target: The target of the particular error. For example, the name of the property in error. @@ -674,8 +1261,8 @@ class DeleteCertificateError(Model): 'details': {'key': 'details', 'type': '[DeleteCertificateError]'}, } - def __init__(self, code, message, target=None, details=None): - super(DeleteCertificateError, self).__init__() + def __init__(self, *, code: str, message: str, target: str=None, details=None, **kwargs) -> None: + super(DeleteCertificateError, self).__init__(**kwargs) self.code = code self.message = message self.target = target @@ -704,8 +1291,8 @@ class DeploymentConfiguration(Model): 'virtual_machine_configuration': {'key': 'virtualMachineConfiguration', 'type': 'VirtualMachineConfiguration'}, } - def __init__(self, cloud_service_configuration=None, virtual_machine_configuration=None): - super(DeploymentConfiguration, self).__init__() + def __init__(self, *, cloud_service_configuration=None, virtual_machine_configuration=None, **kwargs) -> None: + super(DeploymentConfiguration, self).__init__(**kwargs) self.cloud_service_configuration = cloud_service_configuration self.virtual_machine_configuration = virtual_machine_configuration @@ -713,7 +1300,9 @@ def __init__(self, cloud_service_configuration=None, virtual_machine_configurati class EnvironmentSetting(Model): """An environment variable to be set on a task process. - :param name: The name of the environment variable. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the environment variable. :type name: str :param value: The value of the environment variable. :type value: str @@ -728,8 +1317,8 @@ class EnvironmentSetting(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, name, value=None): - super(EnvironmentSetting, self).__init__() + def __init__(self, *, name: str, value: str=None, **kwargs) -> None: + super(EnvironmentSetting, self).__init__(**kwargs) self.name = name self.value = value @@ -767,8 +1356,8 @@ class FixedScaleSettings(Model): 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, } - def __init__(self, resize_timeout=None, target_dedicated_nodes=None, target_low_priority_nodes=None, node_deallocation_option=None): - super(FixedScaleSettings, self).__init__() + def __init__(self, *, resize_timeout=None, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, node_deallocation_option=None, **kwargs) -> None: + super(FixedScaleSettings, self).__init__(**kwargs) self.resize_timeout = resize_timeout self.target_dedicated_nodes = target_dedicated_nodes self.target_low_priority_nodes = target_low_priority_nodes @@ -815,8 +1404,8 @@ class ImageReference(Model): 'id': {'key': 'id', 'type': 'str'}, } - def __init__(self, publisher=None, offer=None, sku=None, version=None, id=None): - super(ImageReference, self).__init__() + def __init__(self, *, publisher: str=None, offer: str=None, sku: str=None, version: str=None, id: str=None, **kwargs) -> None: + super(ImageReference, self).__init__(**kwargs) self.publisher = publisher self.offer = offer self.sku = sku @@ -828,30 +1417,32 @@ class InboundNatPool(Model): """A inbound NAT pool that can be used to address specific ports on compute nodes in a Batch pool externally. - :param name: The name of the endpoint. The name must be unique within a - Batch pool, can contain letters, numbers, underscores, periods, and - hyphens. Names must start with a letter or number, must end with a letter, - number, or underscore, and cannot exceed 77 characters. If any invalid - values are provided the request fails with HTTP status code 400. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the endpoint. The name must be unique + within a Batch pool, can contain letters, numbers, underscores, periods, + and hyphens. Names must start with a letter or number, must end with a + letter, number, or underscore, and cannot exceed 77 characters. If any + invalid values are provided the request fails with HTTP status code 400. :type name: str - :param protocol: The protocol of the endpoint. Possible values include: - 'TCP', 'UDP' + :param protocol: Required. The protocol of the endpoint. Possible values + include: 'TCP', 'UDP' :type protocol: str or ~azure.mgmt.batch.models.InboundEndpointProtocol - :param backend_port: The port number on the compute node. This must be - unique within a Batch pool. Acceptable values are between 1 and 65535 - except for 22, 3389, 29876 and 29877 as these are reserved. If any + :param backend_port: Required. The port number on the compute node. This + must be unique within a Batch pool. Acceptable values are between 1 and + 65535 except for 22, 3389, 29876 and 29877 as these are reserved. If any reserved values are provided the request fails with HTTP status code 400. :type backend_port: int - :param frontend_port_range_start: The first port number in the range of - external ports that will be used to provide inbound access to the + :param frontend_port_range_start: Required. The first port number in the + range of external ports that will be used to provide inbound access to the backendPort on individual compute nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved. All ranges within a pool must be distinct and cannot overlap. If any reserved or overlapping values are provided the request fails with HTTP status code 400. :type frontend_port_range_start: int - :param frontend_port_range_end: The last port number in the range of - external ports that will be used to provide inbound access to the + :param frontend_port_range_end: Required. The last port number in the + range of external ports that will be used to provide inbound access to the backendPort on individual compute nodes. Acceptable values range between 1 and 65534 except ports from 50000 to 55000 which are reserved by the Batch service. All ranges within a pool must be distinct and cannot overlap. If @@ -886,8 +1477,8 @@ class InboundNatPool(Model): 'network_security_group_rules': {'key': 'networkSecurityGroupRules', 'type': '[NetworkSecurityGroupRule]'}, } - def __init__(self, name, protocol, backend_port, frontend_port_range_start, frontend_port_range_end, network_security_group_rules=None): - super(InboundNatPool, self).__init__() + def __init__(self, *, name: str, protocol, backend_port: int, frontend_port_range_start: int, frontend_port_range_end: int, network_security_group_rules=None, **kwargs) -> None: + super(InboundNatPool, self).__init__(**kwargs) self.name = name self.protocol = protocol self.backend_port = backend_port @@ -899,11 +1490,13 @@ def __init__(self, name, protocol, backend_port, frontend_port_range_start, fron class KeyVaultReference(Model): """Identifies the Azure key vault associated with a Batch account. - :param id: The resource ID of the Azure key vault associated with the - Batch account. + All required parameters must be populated in order to send to Azure. + + :param id: Required. The resource ID of the Azure key vault associated + with the Batch account. :type id: str - :param url: The URL of the Azure key vault associated with the Batch - account. + :param url: Required. The URL of the Azure key vault associated with the + Batch account. :type url: str """ @@ -917,8 +1510,8 @@ class KeyVaultReference(Model): 'url': {'key': 'url', 'type': 'str'}, } - def __init__(self, id, url): - super(KeyVaultReference, self).__init__() + def __init__(self, *, id: str, url: str, **kwargs) -> None: + super(KeyVaultReference, self).__init__(**kwargs) self.id = id self.url = url @@ -951,8 +1544,8 @@ class LinuxUserConfiguration(Model): 'ssh_private_key': {'key': 'sshPrivateKey', 'type': 'str'}, } - def __init__(self, uid=None, gid=None, ssh_private_key=None): - super(LinuxUserConfiguration, self).__init__() + def __init__(self, *, uid: int=None, gid: int=None, ssh_private_key: str=None, **kwargs) -> None: + super(LinuxUserConfiguration, self).__init__(**kwargs) self.uid = uid self.gid = gid self.ssh_private_key = ssh_private_key @@ -964,9 +1557,11 @@ class MetadataItem(Model): The Batch service does not assign any meaning to this metadata; it is solely for the use of user code. - :param name: The name of the metadata item. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metadata item. :type name: str - :param value: The value of the metadata item. + :param value: Required. The value of the metadata item. :type value: str """ @@ -980,8 +1575,8 @@ class MetadataItem(Model): 'value': {'key': 'value', 'type': 'str'}, } - def __init__(self, name, value): - super(MetadataItem, self).__init__() + def __init__(self, *, name: str, value: str, **kwargs) -> None: + super(MetadataItem, self).__init__(**kwargs) self.name = name self.value = value @@ -1027,8 +1622,8 @@ class NetworkConfiguration(Model): 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, } - def __init__(self, subnet_id=None, endpoint_configuration=None): - super(NetworkConfiguration, self).__init__() + def __init__(self, *, subnet_id: str=None, endpoint_configuration=None, **kwargs) -> None: + super(NetworkConfiguration, self).__init__(**kwargs) self.subnet_id = subnet_id self.endpoint_configuration = endpoint_configuration @@ -1036,22 +1631,25 @@ def __init__(self, subnet_id=None, endpoint_configuration=None): class NetworkSecurityGroupRule(Model): """A network security group rule to apply to an inbound endpoint. - :param priority: The priority for this rule. Priorities within a pool must - be unique and are evaluated in order of priority. The lower the number the - higher the priority. For example, rules could be specified with order - numbers of 150, 250, and 350. The rule with the order number of 150 takes - precedence over the rule that has an order of 250. Allowed priorities are - 150 to 3500. If any reserved or duplicate values are provided the request - fails with HTTP status code 400. + All required parameters must be populated in order to send to Azure. + + :param priority: Required. The priority for this rule. Priorities within a + pool must be unique and are evaluated in order of priority. The lower the + number the higher the priority. For example, rules could be specified with + order numbers of 150, 250, and 350. The rule with the order number of 150 + takes precedence over the rule that has an order of 250. Allowed + priorities are 150 to 3500. If any reserved or duplicate values are + provided the request fails with HTTP status code 400. :type priority: int - :param access: The action that should be taken for a specified IP address, - subnet range or tag. Possible values include: 'Allow', 'Deny' + :param access: Required. The action that should be taken for a specified + IP address, subnet range or tag. Possible values include: 'Allow', 'Deny' :type access: str or ~azure.mgmt.batch.models.NetworkSecurityGroupRuleAccess - :param source_address_prefix: The source address prefix or tag to match - for the rule. Valid values are a single IP address (i.e. 10.10.10.10), IP - subnet (i.e. 192.168.1.0/24), default tag, or * (for all addresses). If - any other values are provided the request fails with HTTP status code 400. + :param source_address_prefix: Required. The source address prefix or tag + to match for the rule. Valid values are a single IP address (i.e. + 10.10.10.10), IP subnet (i.e. 192.168.1.0/24), default tag, or * (for all + addresses). If any other values are provided the request fails with HTTP + status code 400. :type source_address_prefix: str """ @@ -1067,8 +1665,8 @@ class NetworkSecurityGroupRule(Model): 'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'}, } - def __init__(self, priority, access, source_address_prefix): - super(NetworkSecurityGroupRule, self).__init__() + def __init__(self, *, priority: int, access, source_address_prefix: str, **kwargs) -> None: + super(NetworkSecurityGroupRule, self).__init__(**kwargs) self.priority = priority self.access = access self.source_address_prefix = source_address_prefix @@ -1095,8 +1693,8 @@ class Operation(Model): 'properties': {'key': 'properties', 'type': 'object'}, } - def __init__(self, name=None, display=None, origin=None, properties=None): - super(Operation, self).__init__() + def __init__(self, *, name: str=None, display=None, origin: str=None, properties=None, **kwargs) -> None: + super(Operation, self).__init__(**kwargs) self.name = name self.display = display self.origin = origin @@ -1124,40 +1722,16 @@ class OperationDisplay(Model): 'description': {'key': 'description', 'type': 'str'}, } - def __init__(self, provider=None, operation=None, resource=None, description=None): - super(OperationDisplay, self).__init__() + def __init__(self, *, provider: str=None, operation: str=None, resource: str=None, description: str=None, **kwargs) -> None: + super(OperationDisplay, self).__init__(**kwargs) self.provider = provider self.operation = operation self.resource = resource self.description = description -class PoolEndpointConfiguration(Model): - """The endpoint configuration for a pool. - - :param inbound_nat_pools: A list of inbound NAT pools that can be used to - address specific ports on an individual compute node externally. The - maximum number of inbound NAT pools per Batch pool is 5. If the maximum - number of inbound NAT pools is exceeded the request fails with HTTP status - code 400. - :type inbound_nat_pools: list[~azure.mgmt.batch.models.InboundNatPool] - """ - - _validation = { - 'inbound_nat_pools': {'required': True}, - } - - _attribute_map = { - 'inbound_nat_pools': {'key': 'inboundNatPools', 'type': '[InboundNatPool]'}, - } - - def __init__(self, inbound_nat_pools): - super(PoolEndpointConfiguration, self).__init__() - self.inbound_nat_pools = inbound_nat_pools - - -class ProxyResource(Model): - """A definition of an Azure resource. +class Pool(ProxyResource): + """Contains information about a pool. Variables are only populated by the server, and will be ignored when sending a request. @@ -1170,1151 +1744,758 @@ class ProxyResource(Model): :vartype type: str :ivar etag: The ETag of the resource, used for concurrency statements. :vartype etag: str + :param display_name: The display name for the pool. The display name need + not be unique and can contain any Unicode characters up to a maximum + length of 1024. + :type display_name: str + :ivar last_modified: The last modified time of the pool. This is the last + time at which the pool level data, such as the targetDedicatedNodes or + autoScaleSettings, changed. It does not factor in node-level changes such + as a compute node changing state. + :vartype last_modified: datetime + :ivar creation_time: The creation time of the pool. + :vartype creation_time: datetime + :ivar provisioning_state: The current state of the pool. Possible values + include: 'Succeeded', 'Deleting' + :vartype provisioning_state: str or + ~azure.mgmt.batch.models.PoolProvisioningState + :ivar provisioning_state_transition_time: The time at which the pool + entered its current state. + :vartype provisioning_state_transition_time: datetime + :ivar allocation_state: Whether the pool is resizing. Possible values + include: 'Steady', 'Resizing', 'Stopping' + :vartype allocation_state: str or ~azure.mgmt.batch.models.AllocationState + :ivar allocation_state_transition_time: The time at which the pool entered + its current allocation state. + :vartype allocation_state_transition_time: datetime + :param vm_size: The size of virtual machines in the pool. All VMs in a + pool are the same size. For information about available sizes of virtual + machines for Cloud Services pools (pools created with + cloudServiceConfiguration), see Sizes for Cloud Services + (http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). + Batch supports all Cloud Services VM sizes except ExtraSmall. For + information about available VM sizes for pools using images from the + Virtual Machines Marketplace (pools created with + virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) + (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) + or Sizes for Virtual Machines (Windows) + (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). + Batch supports all Azure VM sizes except STANDARD_A0 and those with + premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + :type vm_size: str + :param deployment_configuration: This property describes how the pool + nodes will be deployed - using Cloud Services or Virtual Machines. Using + CloudServiceConfiguration specifies that the nodes should be creating + using Azure Cloud Services (PaaS), while VirtualMachineConfiguration uses + Azure Virtual Machines (IaaS). + :type deployment_configuration: + ~azure.mgmt.batch.models.DeploymentConfiguration + :ivar current_dedicated_nodes: The number of compute nodes currently in + the pool. + :vartype current_dedicated_nodes: int + :ivar current_low_priority_nodes: The number of low priority compute nodes + currently in the pool. + :vartype current_low_priority_nodes: int + :param scale_settings: Settings which configure the number of nodes in the + pool. + :type scale_settings: ~azure.mgmt.batch.models.ScaleSettings + :ivar auto_scale_run: The results and errors from the last execution of + the autoscale formula. This property is set only if the pool automatically + scales, i.e. autoScaleSettings are used. + :vartype auto_scale_run: ~azure.mgmt.batch.models.AutoScaleRun + :param inter_node_communication: Whether the pool permits direct + communication between nodes. This imposes restrictions on which nodes can + be assigned to the pool. Enabling this value can reduce the chance of the + requested number of nodes to be allocated in the pool. If not specified, + this value defaults to 'Disabled'. Possible values include: 'Enabled', + 'Disabled' + :type inter_node_communication: str or + ~azure.mgmt.batch.models.InterNodeCommunicationState + :param network_configuration: The network configuration for the pool. + :type network_configuration: ~azure.mgmt.batch.models.NetworkConfiguration + :param max_tasks_per_node: The maximum number of tasks that can run + concurrently on a single compute node in the pool. The default value is 1. + The maximum value is the smaller of 4 times the number of cores of the + vmSize of the pool or 256. + :type max_tasks_per_node: int + :param task_scheduling_policy: How tasks are distributed across compute + nodes in a pool. If not specified, the default is spread. + :type task_scheduling_policy: + ~azure.mgmt.batch.models.TaskSchedulingPolicy + :param user_accounts: The list of user accounts to be created on each node + in the pool. + :type user_accounts: list[~azure.mgmt.batch.models.UserAccount] + :param metadata: A list of name-value pairs associated with the pool as + metadata. The Batch service does not assign any meaning to metadata; it is + solely for the use of user code. + :type metadata: list[~azure.mgmt.batch.models.MetadataItem] + :param start_task: A task specified to run on each compute node as it + joins the pool. In an PATCH (update) operation, this property can be set + to an empty object to remove the start task from the pool. + :type start_task: ~azure.mgmt.batch.models.StartTask + :param certificates: The list of certificates to be installed on each + compute node in the pool. For Windows compute nodes, the Batch service + installs the certificates to the specified certificate store and location. + For Linux compute nodes, the certificates are stored in a directory inside + the task working directory and an environment variable + AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this + location. For certificates with visibility of 'remoteUser', a 'certs' + directory is created in the user's home directory (e.g., + /home/{user-name}/certs) and certificates are placed in that directory. + :type certificates: list[~azure.mgmt.batch.models.CertificateReference] + :param application_packages: The list of application packages to be + installed on each compute node in the pool. Changes to application package + references affect all new compute nodes joining the pool, but do not + affect compute nodes that are already in the pool until they are rebooted + or reimaged. There is a maximum of 10 application package references on + any given pool. + :type application_packages: + list[~azure.mgmt.batch.models.ApplicationPackageReference] + :param application_licenses: The list of application licenses the Batch + service will make available on each compute node in the pool. The list of + application licenses must be a subset of available Batch service + application licenses. If a license is requested which is not supported, + pool creation will fail. + :type application_licenses: list[str] + :ivar resize_operation_status: Contains details about the current or last + completed resize operation. + :vartype resize_operation_status: + ~azure.mgmt.batch.models.ResizeOperationStatus """ _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - } - - def __init__(self): - super(ProxyResource, self).__init__() - self.id = None - self.name = None - self.type = None - self.etag = None - - -class ResizeError(Model): - """An error that occurred when resizing a pool. - - :param code: An identifier for the error. Codes are invariant and are - intended to be consumed programmatically. - :type code: str - :param message: A message describing the error, intended to be suitable - for display in a user interface. - :type message: str - :param details: Additional details about the error. - :type details: list[~azure.mgmt.batch.models.ResizeError] - """ - - _validation = { - 'code': {'required': True}, - 'message': {'required': True}, - } - - _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[ResizeError]'}, - } - - def __init__(self, code, message, details=None): - super(ResizeError, self).__init__() - self.code = code - self.message = message - self.details = details - - -class ResizeOperationStatus(Model): - """Details about the current or last completed resize operation. - - Describes either the current operation (if the pool AllocationState is - Resizing) or the previously completed operation (if the AllocationState is - Steady). - - :param target_dedicated_nodes: The desired number of dedicated compute - nodes in the pool. - :type target_dedicated_nodes: int - :param target_low_priority_nodes: The desired number of low-priority - compute nodes in the pool. - :type target_low_priority_nodes: int - :param resize_timeout: The timeout for allocation of compute nodes to the - pool or removal of compute nodes from the pool. The default value is 15 - minutes. The minimum value is 5 minutes. If you specify a value less than - 5 minutes, the Batch service returns an error; if you are calling the REST - API directly, the HTTP status code is 400 (Bad Request). - :type resize_timeout: timedelta - :param node_deallocation_option: Determines what to do with a node and its - running task(s) if the pool size is decreasing. The default value is - requeue. Possible values include: 'Requeue', 'Terminate', - 'TaskCompletion', 'RetainedData' - :type node_deallocation_option: str or - ~azure.mgmt.batch.models.ComputeNodeDeallocationOption - :param start_time: The time when this resize operation was started. - :type start_time: datetime - :param errors: Details of any errors encountered while performing the last - resize on the pool. This property is set only if an error occurred during - the last pool resize, and only when the pool allocationState is Steady. - :type errors: list[~azure.mgmt.batch.models.ResizeError] - """ - - _attribute_map = { - 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, - 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, - 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, - 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'errors': {'key': 'errors', 'type': '[ResizeError]'}, - } - - def __init__(self, target_dedicated_nodes=None, target_low_priority_nodes=None, resize_timeout=None, node_deallocation_option=None, start_time=None, errors=None): - super(ResizeOperationStatus, self).__init__() - self.target_dedicated_nodes = target_dedicated_nodes - self.target_low_priority_nodes = target_low_priority_nodes - self.resize_timeout = resize_timeout - self.node_deallocation_option = node_deallocation_option - self.start_time = start_time - self.errors = errors - - -class Resource(Model): - """A definition of an Azure resource. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar location: The location of the resource. - :vartype location: str - :ivar tags: The tags of the resource. - :vartype tags: dict[str, str] - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'location': {'readonly': True}, - 'tags': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - } - - def __init__(self): - super(Resource, self).__init__() - self.id = None - self.name = None - self.type = None - self.location = None - self.tags = None - - -class ResourceFile(Model): - """A single file or multiple files to be downloaded to a compute node. - - :param auto_storage_container_name: The storage container name in the auto - storage account. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. - :type auto_storage_container_name: str - :param storage_container_url: The URL of the blob container within Azure - Blob Storage. The autoStorageContainerName, storageContainerUrl and - httpUrl properties are mutually exclusive and one of them must be - specified. This URL must be readable and listable using anonymous access; - that is, the Batch service does not present any credentials when - downloading blobs from the container. There are two ways to get such a URL - for a container in Azure storage: include a Shared Access Signature (SAS) - granting read and list permissions on the container, or set the ACL for - the container to allow public access. - :type storage_container_url: str - :param http_url: The URL of the file to download. The - autoStorageContainerName, storageContainerUrl and httpUrl properties are - mutually exclusive and one of them must be specified. If the URL is Azure - Blob Storage, it must be readable using anonymous access; that is, the - Batch service does not present any credentials when downloading the blob. - There are two ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the blob, or - set the ACL for the blob or its container to allow public access. - :type http_url: str - :param blob_prefix: The blob prefix to use when downloading blobs from an - Azure Storage container. Only the blobs whose names begin with the - specified prefix will be downloaded. The property is valid only when - autoStorageContainerName or storageContainerUrl is used. This prefix can - be a partial filename or a subdirectory. If a prefix is not specified, all - the files in the container will be downloaded. - :type blob_prefix: str - :param file_path: The location on the compute node to which to download - the file, relative to the task's working directory. If the httpUrl - property is specified, the filePath is required and describes the path - which the file will be downloaded to, including the filename. Otherwise, - if the autoStorageContainerName or storageContainerUrl property is - specified, filePath is optional and is the directory to download the files - to. In the case where filePath is used as a directory, any directory - structure already associated with the input data will be retained in full - and appended to the specified filePath directory. The specified relative - path cannot break out of the task's working directory (for example by - using '..'). - :type file_path: str - :param file_mode: The file permission mode attribute in octal format. This - property applies only to files being downloaded to Linux compute nodes. It - will be ignored if it is specified for a resourceFile which will be - downloaded to a Windows node. If this property is not specified for a - Linux node, then a default value of 0770 is applied to the file. - :type file_mode: str - """ - - _attribute_map = { - 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, - 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, - 'http_url': {'key': 'httpUrl', 'type': 'str'}, - 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, - 'file_path': {'key': 'filePath', 'type': 'str'}, - 'file_mode': {'key': 'fileMode', 'type': 'str'}, - } - - def __init__(self, auto_storage_container_name=None, storage_container_url=None, http_url=None, blob_prefix=None, file_path=None, file_mode=None): - super(ResourceFile, self).__init__() - self.auto_storage_container_name = auto_storage_container_name - self.storage_container_url = storage_container_url - self.http_url = http_url - self.blob_prefix = blob_prefix - self.file_path = file_path - self.file_mode = file_mode - - -class ScaleSettings(Model): - """Scale settings for the pool. - - Defines the desired size of the pool. This can either be 'fixedScale' where - the requested targetDedicatedNodes is specified, or 'autoScale' which - defines a formula which is periodically reevaluated. If this property is - not specified, the pool will have a fixed scale with 0 - targetDedicatedNodes. - - :param fixed_scale: Fixed scale settings for the pool. This property and - autoScale are mutually exclusive and one of the properties must be - specified. - :type fixed_scale: ~azure.mgmt.batch.models.FixedScaleSettings - :param auto_scale: AutoScale settings for the pool. This property and - fixedScale are mutually exclusive and one of the properties must be - specified. - :type auto_scale: ~azure.mgmt.batch.models.AutoScaleSettings - """ - - _attribute_map = { - 'fixed_scale': {'key': 'fixedScale', 'type': 'FixedScaleSettings'}, - 'auto_scale': {'key': 'autoScale', 'type': 'AutoScaleSettings'}, - } - - def __init__(self, fixed_scale=None, auto_scale=None): - super(ScaleSettings, self).__init__() - self.fixed_scale = fixed_scale - self.auto_scale = auto_scale - - -class StartTask(Model): - """A task which is run when a compute node joins a pool in the Azure Batch - service, or when the compute node is rebooted or reimaged. - - :param command_line: The command line of the start task. The command line - does not run under a shell, and therefore cannot take advantage of shell - features such as environment variable expansion. If you want to take - advantage of such features, you should invoke the shell in the command - line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c - MyCommand" in Linux. Required if any other properties of the startTask are - specified. - :type command_line: str - :param resource_files: A list of files that the Batch service will - download to the compute node before running the command line. - :type resource_files: list[~azure.mgmt.batch.models.ResourceFile] - :param environment_settings: A list of environment variable settings for - the start task. - :type environment_settings: - list[~azure.mgmt.batch.models.EnvironmentSetting] - :param user_identity: The user identity under which the start task runs. - If omitted, the task runs as a non-administrative user unique to the task. - :type user_identity: ~azure.mgmt.batch.models.UserIdentity - :param max_task_retry_count: The maximum number of times the task may be - retried. The Batch service retries a task if its exit code is nonzero. - Note that this value specifically controls the number of retries. The - Batch service will try the task once, and may then retry up to this limit. - For example, if the maximum retry count is 3, Batch tries the task up to 4 - times (one initial try and 3 retries). If the maximum retry count is 0, - the Batch service does not retry the task. If the maximum retry count is - -1, the Batch service retries the task without limit. - :type max_task_retry_count: int - :param wait_for_success: Whether the Batch service should wait for the - start task to complete successfully (that is, to exit with exit code 0) - before scheduling any tasks on the compute node. If true and the start - task fails on a compute node, the Batch service retries the start task up - to its maximum retry count (maxTaskRetryCount). If the task has still not - completed successfully after all retries, then the Batch service marks the - compute node unusable, and will not schedule tasks to it. This condition - can be detected via the node state and scheduling error detail. If false, - the Batch service will not wait for the start task to complete. In this - case, other tasks can start executing on the compute node while the start - task is still running; and even if the start task fails, new tasks will - continue to be scheduled on the node. The default is false. - :type wait_for_success: bool - :param container_settings: The settings for the container under which the - start task runs. When this is specified, all directories recursively below - the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the - node) are mapped into the container, all task environment variables are - mapped into the container, and the task command line is executed in the - container. - :type container_settings: ~azure.mgmt.batch.models.TaskContainerSettings - """ - - _attribute_map = { - 'command_line': {'key': 'commandLine', 'type': 'str'}, - 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, - 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, - 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, - 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, - 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, - 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, - } - - def __init__(self, command_line=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count=None, wait_for_success=None, container_settings=None): - super(StartTask, self).__init__() - self.command_line = command_line - self.resource_files = resource_files - self.environment_settings = environment_settings - self.user_identity = user_identity - self.max_task_retry_count = max_task_retry_count - self.wait_for_success = wait_for_success - self.container_settings = container_settings - - -class TaskContainerSettings(Model): - """The container settings for a task. - - :param container_run_options: Additional options to the container create - command. These additional options are supplied as arguments to the "docker - create" command, in addition to those controlled by the Batch Service. - :type container_run_options: str - :param image_name: The image to use to create the container in which the - task will run. This is the full image reference, as would be specified to - "docker pull". If no tag is provided as part of the image name, the tag - ":latest" is used as a default. - :type image_name: str - :param registry: The private registry which contains the container image. - This setting can be omitted if was already provided at pool creation. - :type registry: ~azure.mgmt.batch.models.ContainerRegistry - """ - - _validation = { - 'image_name': {'required': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'etag': {'readonly': True}, + 'last_modified': {'readonly': True}, + 'creation_time': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'provisioning_state_transition_time': {'readonly': True}, + 'allocation_state': {'readonly': True}, + 'allocation_state_transition_time': {'readonly': True}, + 'current_dedicated_nodes': {'readonly': True}, + 'current_low_priority_nodes': {'readonly': True}, + 'auto_scale_run': {'readonly': True}, + 'resize_operation_status': {'readonly': True}, } _attribute_map = { - 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, - 'image_name': {'key': 'imageName', 'type': 'str'}, - 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'etag': {'key': 'etag', 'type': 'str'}, + 'display_name': {'key': 'properties.displayName', 'type': 'str'}, + 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, + 'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'PoolProvisioningState'}, + 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, + 'allocation_state': {'key': 'properties.allocationState', 'type': 'AllocationState'}, + 'allocation_state_transition_time': {'key': 'properties.allocationStateTransitionTime', 'type': 'iso-8601'}, + 'vm_size': {'key': 'properties.vmSize', 'type': 'str'}, + 'deployment_configuration': {'key': 'properties.deploymentConfiguration', 'type': 'DeploymentConfiguration'}, + 'current_dedicated_nodes': {'key': 'properties.currentDedicatedNodes', 'type': 'int'}, + 'current_low_priority_nodes': {'key': 'properties.currentLowPriorityNodes', 'type': 'int'}, + 'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'}, + 'auto_scale_run': {'key': 'properties.autoScaleRun', 'type': 'AutoScaleRun'}, + 'inter_node_communication': {'key': 'properties.interNodeCommunication', 'type': 'InterNodeCommunicationState'}, + 'network_configuration': {'key': 'properties.networkConfiguration', 'type': 'NetworkConfiguration'}, + 'max_tasks_per_node': {'key': 'properties.maxTasksPerNode', 'type': 'int'}, + 'task_scheduling_policy': {'key': 'properties.taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, + 'user_accounts': {'key': 'properties.userAccounts', 'type': '[UserAccount]'}, + 'metadata': {'key': 'properties.metadata', 'type': '[MetadataItem]'}, + 'start_task': {'key': 'properties.startTask', 'type': 'StartTask'}, + 'certificates': {'key': 'properties.certificates', 'type': '[CertificateReference]'}, + 'application_packages': {'key': 'properties.applicationPackages', 'type': '[ApplicationPackageReference]'}, + 'application_licenses': {'key': 'properties.applicationLicenses', 'type': '[str]'}, + 'resize_operation_status': {'key': 'properties.resizeOperationStatus', 'type': 'ResizeOperationStatus'}, } - def __init__(self, image_name, container_run_options=None, registry=None): - super(TaskContainerSettings, self).__init__() - self.container_run_options = container_run_options - self.image_name = image_name - self.registry = registry + def __init__(self, *, display_name: str=None, vm_size: str=None, deployment_configuration=None, scale_settings=None, inter_node_communication=None, network_configuration=None, max_tasks_per_node: int=None, task_scheduling_policy=None, user_accounts=None, metadata=None, start_task=None, certificates=None, application_packages=None, application_licenses=None, **kwargs) -> None: + super(Pool, self).__init__(**kwargs) + self.display_name = display_name + self.last_modified = None + self.creation_time = None + self.provisioning_state = None + self.provisioning_state_transition_time = None + self.allocation_state = None + self.allocation_state_transition_time = None + self.vm_size = vm_size + self.deployment_configuration = deployment_configuration + self.current_dedicated_nodes = None + self.current_low_priority_nodes = None + self.scale_settings = scale_settings + self.auto_scale_run = None + self.inter_node_communication = inter_node_communication + self.network_configuration = network_configuration + self.max_tasks_per_node = max_tasks_per_node + self.task_scheduling_policy = task_scheduling_policy + self.user_accounts = user_accounts + self.metadata = metadata + self.start_task = start_task + self.certificates = certificates + self.application_packages = application_packages + self.application_licenses = application_licenses + self.resize_operation_status = None -class TaskSchedulingPolicy(Model): - """Specifies how tasks should be distributed across compute nodes. +class PoolEndpointConfiguration(Model): + """The endpoint configuration for a pool. - :param node_fill_type: How tasks should be distributed across compute - nodes. Possible values include: 'Spread', 'Pack' - :type node_fill_type: str or ~azure.mgmt.batch.models.ComputeNodeFillType + All required parameters must be populated in order to send to Azure. + + :param inbound_nat_pools: Required. A list of inbound NAT pools that can + be used to address specific ports on an individual compute node + externally. The maximum number of inbound NAT pools per Batch pool is 5. + If the maximum number of inbound NAT pools is exceeded the request fails + with HTTP status code 400. + :type inbound_nat_pools: list[~azure.mgmt.batch.models.InboundNatPool] """ _validation = { - 'node_fill_type': {'required': True}, + 'inbound_nat_pools': {'required': True}, } _attribute_map = { - 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, + 'inbound_nat_pools': {'key': 'inboundNatPools', 'type': '[InboundNatPool]'}, } - def __init__(self, node_fill_type): - super(TaskSchedulingPolicy, self).__init__() - self.node_fill_type = node_fill_type + def __init__(self, *, inbound_nat_pools, **kwargs) -> None: + super(PoolEndpointConfiguration, self).__init__(**kwargs) + self.inbound_nat_pools = inbound_nat_pools -class UserAccount(Model): - """Properties used to create a user on an Azure Batch node. +class ResizeError(Model): + """An error that occurred when resizing a pool. - :param name: The name of the user account. - :type name: str - :param password: The password for the user account. - :type password: str - :param elevation_level: The elevation level of the user account. nonAdmin - - The auto user is a standard user without elevated access. admin - The - auto user is a user with elevated access and operates with full - Administrator permissions. The default value is nonAdmin. Possible values - include: 'NonAdmin', 'Admin' - :type elevation_level: str or ~azure.mgmt.batch.models.ElevationLevel - :param linux_user_configuration: The Linux-specific user configuration for - the user account. This property is ignored if specified on a Windows pool. - If not specified, the user is created with the default options. - :type linux_user_configuration: - ~azure.mgmt.batch.models.LinuxUserConfiguration - :param windows_user_configuration: The Windows-specific user configuration - for the user account. This property can only be specified if the user is - on a Windows pool. If not specified and on a Windows pool, the user is - created with the default options. - :type windows_user_configuration: - ~azure.mgmt.batch.models.WindowsUserConfiguration + All required parameters must be populated in order to send to Azure. + + :param code: Required. An identifier for the error. Codes are invariant + and are intended to be consumed programmatically. + :type code: str + :param message: Required. A message describing the error, intended to be + suitable for display in a user interface. + :type message: str + :param details: Additional details about the error. + :type details: list[~azure.mgmt.batch.models.ResizeError] """ _validation = { - 'name': {'required': True}, - 'password': {'required': True}, + 'code': {'required': True}, + 'message': {'required': True}, } _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'password': {'key': 'password', 'type': 'str'}, - 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, - 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, - 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[ResizeError]'}, } - def __init__(self, name, password, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None): - super(UserAccount, self).__init__() - self.name = name - self.password = password - self.elevation_level = elevation_level - self.linux_user_configuration = linux_user_configuration - self.windows_user_configuration = windows_user_configuration + def __init__(self, *, code: str, message: str, details=None, **kwargs) -> None: + super(ResizeError, self).__init__(**kwargs) + self.code = code + self.message = message + self.details = details -class UserIdentity(Model): - """The definition of the user identity under which the task is run. +class ResizeOperationStatus(Model): + """Details about the current or last completed resize operation. - Specify either the userName or autoUser property, but not both. + Describes either the current operation (if the pool AllocationState is + Resizing) or the previously completed operation (if the AllocationState is + Steady). - :param user_name: The name of the user identity under which the task is - run. The userName and autoUser properties are mutually exclusive; you must - specify one but not both. - :type user_name: str - :param auto_user: The auto user under which the task is run. The userName - and autoUser properties are mutually exclusive; you must specify one but - not both. - :type auto_user: ~azure.mgmt.batch.models.AutoUserSpecification + :param target_dedicated_nodes: The desired number of dedicated compute + nodes in the pool. + :type target_dedicated_nodes: int + :param target_low_priority_nodes: The desired number of low-priority + compute nodes in the pool. + :type target_low_priority_nodes: int + :param resize_timeout: The timeout for allocation of compute nodes to the + pool or removal of compute nodes from the pool. The default value is 15 + minutes. The minimum value is 5 minutes. If you specify a value less than + 5 minutes, the Batch service returns an error; if you are calling the REST + API directly, the HTTP status code is 400 (Bad Request). + :type resize_timeout: timedelta + :param node_deallocation_option: Determines what to do with a node and its + running task(s) if the pool size is decreasing. The default value is + requeue. Possible values include: 'Requeue', 'Terminate', + 'TaskCompletion', 'RetainedData' + :type node_deallocation_option: str or + ~azure.mgmt.batch.models.ComputeNodeDeallocationOption + :param start_time: The time when this resize operation was started. + :type start_time: datetime + :param errors: Details of any errors encountered while performing the last + resize on the pool. This property is set only if an error occurred during + the last pool resize, and only when the pool allocationState is Steady. + :type errors: list[~azure.mgmt.batch.models.ResizeError] """ _attribute_map = { - 'user_name': {'key': 'userName', 'type': 'str'}, - 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, + 'target_dedicated_nodes': {'key': 'targetDedicatedNodes', 'type': 'int'}, + 'target_low_priority_nodes': {'key': 'targetLowPriorityNodes', 'type': 'int'}, + 'resize_timeout': {'key': 'resizeTimeout', 'type': 'duration'}, + 'node_deallocation_option': {'key': 'nodeDeallocationOption', 'type': 'ComputeNodeDeallocationOption'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'errors': {'key': 'errors', 'type': '[ResizeError]'}, } - def __init__(self, user_name=None, auto_user=None): - super(UserIdentity, self).__init__() - self.user_name = user_name - self.auto_user = auto_user + def __init__(self, *, target_dedicated_nodes: int=None, target_low_priority_nodes: int=None, resize_timeout=None, node_deallocation_option=None, start_time=None, errors=None, **kwargs) -> None: + super(ResizeOperationStatus, self).__init__(**kwargs) + self.target_dedicated_nodes = target_dedicated_nodes + self.target_low_priority_nodes = target_low_priority_nodes + self.resize_timeout = resize_timeout + self.node_deallocation_option = node_deallocation_option + self.start_time = start_time + self.errors = errors -class VirtualMachineConfiguration(Model): - """The configuration for compute nodes in a pool based on the Azure Virtual - Machines infrastructure. +class ResourceFile(Model): + """A single file or multiple files to be downloaded to a compute node. - :param image_reference: A reference to the Azure Virtual Machines - Marketplace Image or the custom Virtual Machine Image to use. - :type image_reference: ~azure.mgmt.batch.models.ImageReference - :param node_agent_sku_id: The SKU of the Batch node agent to be - provisioned on compute nodes in the pool. The Batch node agent is a - program that runs on each node in the pool, and provides the - command-and-control interface between the node and the Batch service. - There are different implementations of the node agent, known as SKUs, for - different operating systems. You must specify a node agent SKU which - matches the selected image reference. To get the list of supported node - agent SKUs along with their list of verified image references, see the - 'List supported node agent SKUs' operation. - :type node_agent_sku_id: str - :param windows_configuration: Windows operating system settings on the - virtual machine. This property must not be specified if the imageReference - specifies a Linux OS image. - :type windows_configuration: ~azure.mgmt.batch.models.WindowsConfiguration - :param data_disks: The configuration for data disks attached to the - compute nodes in the pool. This property must be specified if the compute - nodes in the pool need to have empty data disks attached to them. - :type data_disks: list[~azure.mgmt.batch.models.DataDisk] - :param license_type: The type of on-premises license to be used when - deploying the operating system. This only applies to images that contain - the Windows operating system, and should only be used when you hold valid - on-premises licenses for the nodes which will be deployed. If omitted, no - on-premises licensing discount is applied. Values are: - Windows_Server - The on-premises license is for Windows Server. - Windows_Client - The on-premises license is for Windows Client. - :type license_type: str - :param container_configuration: The container configuration for the pool. - If specified, setup is performed on each node in the pool to allow tasks - to run in containers. All regular tasks and job manager tasks run on this - pool must specify the containerSettings property, and all other tasks may - specify it. - :type container_configuration: - ~azure.mgmt.batch.models.ContainerConfiguration + :param auto_storage_container_name: The storage container name in the auto + storage account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading the blob. There are two ways to get such a URL for a blob in + Azure storage: include a Shared Access Signature (SAS) granting read and + list permissions on the blob, or set the ACL for the blob or its container + to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL is Azure + Blob Storage, it must be readable using anonymous access; that is, the + Batch service does not present any credentials when downloading the blob. + There are two ways to get such a URL for a blob in Azure storage: include + a Shared Access Signature (SAS) granting read permissions on the blob, or + set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the compute node to which to download + the file, relative to the task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the task's working directory (for example by + using '..'). + :type file_path: str + :param file_mode: The file permission mode attribute in octal format. This + property applies only to files being downloaded to Linux compute nodes. It + will be ignored if it is specified for a resourceFile which will be + downloaded to a Windows node. If this property is not specified for a + Linux node, then a default value of 0770 is applied to the file. + :type file_mode: str """ - _validation = { - 'image_reference': {'required': True}, - 'node_agent_sku_id': {'required': True}, - } - _attribute_map = { - 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'node_agent_sku_id': {'key': 'nodeAgentSkuId', 'type': 'str'}, - 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, - 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, - 'license_type': {'key': 'licenseType', 'type': 'str'}, - 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, + 'file_path': {'key': 'filePath', 'type': 'str'}, + 'file_mode': {'key': 'fileMode', 'type': 'str'}, } - def __init__(self, image_reference, node_agent_sku_id, windows_configuration=None, data_disks=None, license_type=None, container_configuration=None): - super(VirtualMachineConfiguration, self).__init__() - self.image_reference = image_reference - self.node_agent_sku_id = node_agent_sku_id - self.windows_configuration = windows_configuration - self.data_disks = data_disks - self.license_type = license_type - self.container_configuration = container_configuration + def __init__(self, *, auto_storage_container_name: str=None, storage_container_url: str=None, http_url: str=None, blob_prefix: str=None, file_path: str=None, file_mode: str=None, **kwargs) -> None: + super(ResourceFile, self).__init__(**kwargs) + self.auto_storage_container_name = auto_storage_container_name + self.storage_container_url = storage_container_url + self.http_url = http_url + self.blob_prefix = blob_prefix + self.file_path = file_path + self.file_mode = file_mode -class WindowsConfiguration(Model): - """Windows operating system settings to apply to the virtual machine. +class ScaleSettings(Model): + """Scale settings for the pool. - :param enable_automatic_updates: Whether automatic updates are enabled on - the virtual machine. If omitted, the default value is true. - :type enable_automatic_updates: bool + Defines the desired size of the pool. This can either be 'fixedScale' where + the requested targetDedicatedNodes is specified, or 'autoScale' which + defines a formula which is periodically reevaluated. If this property is + not specified, the pool will have a fixed scale with 0 + targetDedicatedNodes. + + :param fixed_scale: Fixed scale settings for the pool. This property and + autoScale are mutually exclusive and one of the properties must be + specified. + :type fixed_scale: ~azure.mgmt.batch.models.FixedScaleSettings + :param auto_scale: AutoScale settings for the pool. This property and + fixedScale are mutually exclusive and one of the properties must be + specified. + :type auto_scale: ~azure.mgmt.batch.models.AutoScaleSettings """ _attribute_map = { - 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, + 'fixed_scale': {'key': 'fixedScale', 'type': 'FixedScaleSettings'}, + 'auto_scale': {'key': 'autoScale', 'type': 'AutoScaleSettings'}, } - def __init__(self, enable_automatic_updates=None): - super(WindowsConfiguration, self).__init__() - self.enable_automatic_updates = enable_automatic_updates + def __init__(self, *, fixed_scale=None, auto_scale=None, **kwargs) -> None: + super(ScaleSettings, self).__init__(**kwargs) + self.fixed_scale = fixed_scale + self.auto_scale = auto_scale -class WindowsUserConfiguration(Model): - """Properties used to create a user account on a Windows node. +class StartTask(Model): + """A task which is run when a compute node joins a pool in the Azure Batch + service, or when the compute node is rebooted or reimaged. - :param login_mode: Login mode for user. Specifies login mode for the user. - The default value for VirtualMachineConfiguration pools is interactive - mode and for CloudServiceConfiguration pools is batch mode. Possible - values include: 'Batch', 'Interactive' - :type login_mode: str or ~azure.mgmt.batch.models.LoginMode + In some cases the start task may be re-run even though the node was not + rebooted. Due to this, start tasks should be idempotent and exit gracefully + if the setup they're performing has already been done. Special care should + be taken to avoid start tasks which create breakaway process or + install/launch services from the start task working directory, as this will + block Batch from being able to re-run the start task. + + :param command_line: The command line of the start task. The command line + does not run under a shell, and therefore cannot take advantage of shell + features such as environment variable expansion. If you want to take + advantage of such features, you should invoke the shell in the command + line, for example using "cmd /c MyCommand" in Windows or "/bin/sh -c + MyCommand" in Linux. Required if any other properties of the startTask are + specified. + :type command_line: str + :param resource_files: A list of files that the Batch service will + download to the compute node before running the command line. + :type resource_files: list[~azure.mgmt.batch.models.ResourceFile] + :param environment_settings: A list of environment variable settings for + the start task. + :type environment_settings: + list[~azure.mgmt.batch.models.EnvironmentSetting] + :param user_identity: The user identity under which the start task runs. + If omitted, the task runs as a non-administrative user unique to the task. + :type user_identity: ~azure.mgmt.batch.models.UserIdentity + :param max_task_retry_count: The maximum number of times the task may be + retried. The Batch service retries a task if its exit code is nonzero. + Note that this value specifically controls the number of retries. The + Batch service will try the task once, and may then retry up to this limit. + For example, if the maximum retry count is 3, Batch tries the task up to 4 + times (one initial try and 3 retries). If the maximum retry count is 0, + the Batch service does not retry the task. If the maximum retry count is + -1, the Batch service retries the task without limit. + :type max_task_retry_count: int + :param wait_for_success: Whether the Batch service should wait for the + start task to complete successfully (that is, to exit with exit code 0) + before scheduling any tasks on the compute node. If true and the start + task fails on a compute node, the Batch service retries the start task up + to its maximum retry count (maxTaskRetryCount). If the task has still not + completed successfully after all retries, then the Batch service marks the + compute node unusable, and will not schedule tasks to it. This condition + can be detected via the node state and scheduling error detail. If false, + the Batch service will not wait for the start task to complete. In this + case, other tasks can start executing on the compute node while the start + task is still running; and even if the start task fails, new tasks will + continue to be scheduled on the node. The default is false. + :type wait_for_success: bool + :param container_settings: The settings for the container under which the + start task runs. When this is specified, all directories recursively below + the AZ_BATCH_NODE_ROOT_DIR (the root of Azure Batch directories on the + node) are mapped into the container, all task environment variables are + mapped into the container, and the task command line is executed in the + container. + :type container_settings: ~azure.mgmt.batch.models.TaskContainerSettings """ _attribute_map = { - 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + 'command_line': {'key': 'commandLine', 'type': 'str'}, + 'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'}, + 'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'}, + 'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'}, + 'max_task_retry_count': {'key': 'maxTaskRetryCount', 'type': 'int'}, + 'wait_for_success': {'key': 'waitForSuccess', 'type': 'bool'}, + 'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'}, } - def __init__(self, login_mode=None): - super(WindowsUserConfiguration, self).__init__() - self.login_mode = login_mode + def __init__(self, *, command_line: str=None, resource_files=None, environment_settings=None, user_identity=None, max_task_retry_count: int=None, wait_for_success: bool=None, container_settings=None, **kwargs) -> None: + super(StartTask, self).__init__(**kwargs) + self.command_line = command_line + self.resource_files = resource_files + self.environment_settings = environment_settings + self.user_identity = user_identity + self.max_task_retry_count = max_task_retry_count + self.wait_for_success = wait_for_success + self.container_settings = container_settings -class Application(ProxyResource): - """Contains information about an application in a Batch account. +class TaskContainerSettings(Model): + """The container settings for a task. - Variables are only populated by the server, and will be ignored when - sending a request. + All required parameters must be populated in order to send to Azure. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param display_name: The display name for the application. - :type display_name: str - :param allow_updates: A value indicating whether packages within the - application may be overwritten using the same version string. - :type allow_updates: bool - :param default_version: The package to use if a client requests the - application but does not specify a version. This property can only be set - to the name of an existing package. - :type default_version: str + :param container_run_options: Additional options to the container create + command. These additional options are supplied as arguments to the "docker + create" command, in addition to those controlled by the Batch Service. + :type container_run_options: str + :param image_name: Required. The image to use to create the container in + which the task will run. This is the full image reference, as would be + specified to "docker pull". If no tag is provided as part of the image + name, the tag ":latest" is used as a default. + :type image_name: str + :param registry: The private registry which contains the container image. + This setting can be omitted if was already provided at pool creation. + :type registry: ~azure.mgmt.batch.models.ContainerRegistry """ _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, + 'image_name': {'required': True}, } _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'display_name': {'key': 'properties.displayName', 'type': 'str'}, - 'allow_updates': {'key': 'properties.allowUpdates', 'type': 'bool'}, - 'default_version': {'key': 'properties.defaultVersion', 'type': 'str'}, + 'container_run_options': {'key': 'containerRunOptions', 'type': 'str'}, + 'image_name': {'key': 'imageName', 'type': 'str'}, + 'registry': {'key': 'registry', 'type': 'ContainerRegistry'}, } - def __init__(self, display_name=None, allow_updates=None, default_version=None): - super(Application, self).__init__() - self.display_name = display_name - self.allow_updates = allow_updates - self.default_version = default_version + def __init__(self, *, image_name: str, container_run_options: str=None, registry=None, **kwargs) -> None: + super(TaskContainerSettings, self).__init__(**kwargs) + self.container_run_options = container_run_options + self.image_name = image_name + self.registry = registry -class ApplicationPackage(ProxyResource): - """An application package which represents a particular version of an - application. +class TaskSchedulingPolicy(Model): + """Specifies how tasks should be distributed across compute nodes. - Variables are only populated by the server, and will be ignored when - sending a request. + All required parameters must be populated in order to send to Azure. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :ivar state: The current state of the application package. Possible values - include: 'Pending', 'Active' - :vartype state: str or ~azure.mgmt.batch.models.PackageState - :ivar format: The format of the application package, if the package is - active. - :vartype format: str - :ivar storage_url: The URL for the application package in Azure Storage. - :vartype storage_url: str - :ivar storage_url_expiry: The UTC time at which the Azure Storage URL will - expire. - :vartype storage_url_expiry: datetime - :ivar last_activation_time: The time at which the package was last - activated, if the package is active. - :vartype last_activation_time: datetime + :param node_fill_type: Required. How tasks should be distributed across + compute nodes. Possible values include: 'Spread', 'Pack' + :type node_fill_type: str or ~azure.mgmt.batch.models.ComputeNodeFillType """ _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'state': {'readonly': True}, - 'format': {'readonly': True}, - 'storage_url': {'readonly': True}, - 'storage_url_expiry': {'readonly': True}, - 'last_activation_time': {'readonly': True}, + 'node_fill_type': {'required': True}, } _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'state': {'key': 'properties.state', 'type': 'PackageState'}, - 'format': {'key': 'properties.format', 'type': 'str'}, - 'storage_url': {'key': 'properties.storageUrl', 'type': 'str'}, - 'storage_url_expiry': {'key': 'properties.storageUrlExpiry', 'type': 'iso-8601'}, - 'last_activation_time': {'key': 'properties.lastActivationTime', 'type': 'iso-8601'}, + 'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'}, } - def __init__(self): - super(ApplicationPackage, self).__init__() - self.state = None - self.format = None - self.storage_url = None - self.storage_url_expiry = None - self.last_activation_time = None + def __init__(self, *, node_fill_type, **kwargs) -> None: + super(TaskSchedulingPolicy, self).__init__(**kwargs) + self.node_fill_type = node_fill_type -class AutoStorageProperties(AutoStorageBaseProperties): - """Contains information about the auto-storage account associated with a Batch - account. +class UserAccount(Model): + """Properties used to create a user on an Azure Batch node. + + All required parameters must be populated in order to send to Azure. - :param storage_account_id: The resource ID of the storage account to be - used for auto-storage account. - :type storage_account_id: str - :param last_key_sync: The UTC time at which storage keys were last - synchronized with the Batch account. - :type last_key_sync: datetime + :param name: Required. The name of the user account. + :type name: str + :param password: Required. The password for the user account. + :type password: str + :param elevation_level: The elevation level of the user account. nonAdmin + - The auto user is a standard user without elevated access. admin - The + auto user is a user with elevated access and operates with full + Administrator permissions. The default value is nonAdmin. Possible values + include: 'NonAdmin', 'Admin' + :type elevation_level: str or ~azure.mgmt.batch.models.ElevationLevel + :param linux_user_configuration: The Linux-specific user configuration for + the user account. This property is ignored if specified on a Windows pool. + If not specified, the user is created with the default options. + :type linux_user_configuration: + ~azure.mgmt.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user account. This property can only be specified if the user is + on a Windows pool. If not specified and on a Windows pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.mgmt.batch.models.WindowsUserConfiguration """ _validation = { - 'storage_account_id': {'required': True}, - 'last_key_sync': {'required': True}, + 'name': {'required': True}, + 'password': {'required': True}, } _attribute_map = { - 'storage_account_id': {'key': 'storageAccountId', 'type': 'str'}, - 'last_key_sync': {'key': 'lastKeySync', 'type': 'iso-8601'}, + 'name': {'key': 'name', 'type': 'str'}, + 'password': {'key': 'password', 'type': 'str'}, + 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, + 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, } - def __init__(self, storage_account_id, last_key_sync): - super(AutoStorageProperties, self).__init__(storage_account_id=storage_account_id) - self.last_key_sync = last_key_sync + def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None, **kwargs) -> None: + super(UserAccount, self).__init__(**kwargs) + self.name = name + self.password = password + self.elevation_level = elevation_level + self.linux_user_configuration = linux_user_configuration + self.windows_user_configuration = windows_user_configuration -class BatchAccount(Resource): - """Contains information about an Azure Batch account. +class UserIdentity(Model): + """The definition of the user identity under which the task is run. - Variables are only populated by the server, and will be ignored when - sending a request. + Specify either the userName or autoUser property, but not both. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar location: The location of the resource. - :vartype location: str - :ivar tags: The tags of the resource. - :vartype tags: dict[str, str] - :ivar account_endpoint: The account endpoint used to interact with the - Batch service. - :vartype account_endpoint: str - :ivar provisioning_state: The provisioned state of the resource. Possible - values include: 'Invalid', 'Creating', 'Deleting', 'Succeeded', 'Failed', - 'Cancelled' - :vartype provisioning_state: str or - ~azure.mgmt.batch.models.ProvisioningState - :ivar pool_allocation_mode: The allocation mode to use for creating pools - in the Batch account. Possible values include: 'BatchService', - 'UserSubscription' - :vartype pool_allocation_mode: str or - ~azure.mgmt.batch.models.PoolAllocationMode - :ivar key_vault_reference: A reference to the Azure key vault associated - with the Batch account. - :vartype key_vault_reference: ~azure.mgmt.batch.models.KeyVaultReference - :ivar auto_storage: The properties and status of any auto-storage account - associated with the Batch account. - :vartype auto_storage: ~azure.mgmt.batch.models.AutoStorageProperties - :ivar dedicated_core_quota: The dedicated core quota for this Batch - account. - :vartype dedicated_core_quota: int - :ivar low_priority_core_quota: The low-priority core quota for this Batch - account. - :vartype low_priority_core_quota: int - :ivar pool_quota: The pool quota for this Batch account. - :vartype pool_quota: int - :ivar active_job_and_job_schedule_quota: The active job and job schedule - quota for this Batch account. - :vartype active_job_and_job_schedule_quota: int + :param user_name: The name of the user identity under which the task is + run. The userName and autoUser properties are mutually exclusive; you must + specify one but not both. + :type user_name: str + :param auto_user: The auto user under which the task is run. The userName + and autoUser properties are mutually exclusive; you must specify one but + not both. + :type auto_user: ~azure.mgmt.batch.models.AutoUserSpecification """ - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'location': {'readonly': True}, - 'tags': {'readonly': True}, - 'account_endpoint': {'readonly': True}, - 'provisioning_state': {'readonly': True}, - 'pool_allocation_mode': {'readonly': True}, - 'key_vault_reference': {'readonly': True}, - 'auto_storage': {'readonly': True}, - 'dedicated_core_quota': {'readonly': True}, - 'low_priority_core_quota': {'readonly': True}, - 'pool_quota': {'readonly': True}, - 'active_job_and_job_schedule_quota': {'readonly': True}, - } - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - 'account_endpoint': {'key': 'properties.accountEndpoint', 'type': 'str'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'}, - 'pool_allocation_mode': {'key': 'properties.poolAllocationMode', 'type': 'PoolAllocationMode'}, - 'key_vault_reference': {'key': 'properties.keyVaultReference', 'type': 'KeyVaultReference'}, - 'auto_storage': {'key': 'properties.autoStorage', 'type': 'AutoStorageProperties'}, - 'dedicated_core_quota': {'key': 'properties.dedicatedCoreQuota', 'type': 'int'}, - 'low_priority_core_quota': {'key': 'properties.lowPriorityCoreQuota', 'type': 'int'}, - 'pool_quota': {'key': 'properties.poolQuota', 'type': 'int'}, - 'active_job_and_job_schedule_quota': {'key': 'properties.activeJobAndJobScheduleQuota', 'type': 'int'}, + 'user_name': {'key': 'userName', 'type': 'str'}, + 'auto_user': {'key': 'autoUser', 'type': 'AutoUserSpecification'}, } - def __init__(self): - super(BatchAccount, self).__init__() - self.account_endpoint = None - self.provisioning_state = None - self.pool_allocation_mode = None - self.key_vault_reference = None - self.auto_storage = None - self.dedicated_core_quota = None - self.low_priority_core_quota = None - self.pool_quota = None - self.active_job_and_job_schedule_quota = None + def __init__(self, *, user_name: str=None, auto_user=None, **kwargs) -> None: + super(UserIdentity, self).__init__(**kwargs) + self.user_name = user_name + self.auto_user = auto_user -class Certificate(ProxyResource): - """Contains information about a certificate. +class VirtualMachineConfiguration(Model): + """The configuration for compute nodes in a pool based on the Azure Virtual + Machines infrastructure. - Variables are only populated by the server, and will be ignored when - sending a request. + All required parameters must be populated in order to send to Azure. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param thumbprint_algorithm: The algorithm of the certificate thumbprint. - This must match the first portion of the certificate name. Currently - required to be 'SHA1'. - :type thumbprint_algorithm: str - :param thumbprint: The thumbprint of the certificate. This must match the - thumbprint from the name. - :type thumbprint: str - :param format: The format of the certificate - either Pfx or Cer. If - omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' - :type format: str or ~azure.mgmt.batch.models.CertificateFormat - :ivar provisioning_state: The provisioned state of the resource. Possible - values include: 'Succeeded', 'Deleting', 'Failed' - :vartype provisioning_state: str or - ~azure.mgmt.batch.models.CertificateProvisioningState - :ivar provisioning_state_transition_time: The time at which the - certificate entered its current state. - :vartype provisioning_state_transition_time: datetime - :ivar previous_provisioning_state: The previous provisioned state of the - resource. Possible values include: 'Succeeded', 'Deleting', 'Failed' - :vartype previous_provisioning_state: str or - ~azure.mgmt.batch.models.CertificateProvisioningState - :ivar previous_provisioning_state_transition_time: The time at which the - certificate entered its previous state. - :vartype previous_provisioning_state_transition_time: datetime - :ivar public_data: The public key of the certificate. - :vartype public_data: str - :ivar delete_certificate_error: The error which occurred while deleting - the certificate. This is only returned when the certificate - provisioningState is 'Failed'. - :vartype delete_certificate_error: - ~azure.mgmt.batch.models.DeleteCertificateError + :param image_reference: Required. A reference to the Azure Virtual + Machines Marketplace Image or the custom Virtual Machine Image to use. + :type image_reference: ~azure.mgmt.batch.models.ImageReference + :param node_agent_sku_id: Required. The SKU of the Batch node agent to be + provisioned on compute nodes in the pool. The Batch node agent is a + program that runs on each node in the pool, and provides the + command-and-control interface between the node and the Batch service. + There are different implementations of the node agent, known as SKUs, for + different operating systems. You must specify a node agent SKU which + matches the selected image reference. To get the list of supported node + agent SKUs along with their list of verified image references, see the + 'List supported node agent SKUs' operation. + :type node_agent_sku_id: str + :param windows_configuration: Windows operating system settings on the + virtual machine. This property must not be specified if the imageReference + specifies a Linux OS image. + :type windows_configuration: ~azure.mgmt.batch.models.WindowsConfiguration + :param data_disks: The configuration for data disks attached to the + compute nodes in the pool. This property must be specified if the compute + nodes in the pool need to have empty data disks attached to them. + :type data_disks: list[~azure.mgmt.batch.models.DataDisk] + :param license_type: The type of on-premises license to be used when + deploying the operating system. This only applies to images that contain + the Windows operating system, and should only be used when you hold valid + on-premises licenses for the nodes which will be deployed. If omitted, no + on-premises licensing discount is applied. Values are: + Windows_Server - The on-premises license is for Windows Server. + Windows_Client - The on-premises license is for Windows Client. + :type license_type: str + :param container_configuration: The container configuration for the pool. + If specified, setup is performed on each node in the pool to allow tasks + to run in containers. All regular tasks and job manager tasks run on this + pool must specify the containerSettings property, and all other tasks may + specify it. + :type container_configuration: + ~azure.mgmt.batch.models.ContainerConfiguration """ _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'provisioning_state': {'readonly': True}, - 'provisioning_state_transition_time': {'readonly': True}, - 'previous_provisioning_state': {'readonly': True}, - 'previous_provisioning_state_transition_time': {'readonly': True}, - 'public_data': {'readonly': True}, - 'delete_certificate_error': {'readonly': True}, + 'image_reference': {'required': True}, + 'node_agent_sku_id': {'required': True}, } _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, - 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, - 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'CertificateProvisioningState'}, - 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, - 'previous_provisioning_state': {'key': 'properties.previousProvisioningState', 'type': 'CertificateProvisioningState'}, - 'previous_provisioning_state_transition_time': {'key': 'properties.previousProvisioningStateTransitionTime', 'type': 'iso-8601'}, - 'public_data': {'key': 'properties.publicData', 'type': 'str'}, - 'delete_certificate_error': {'key': 'properties.deleteCertificateError', 'type': 'DeleteCertificateError'}, + 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, + 'node_agent_sku_id': {'key': 'nodeAgentSkuId', 'type': 'str'}, + 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, + 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, + 'license_type': {'key': 'licenseType', 'type': 'str'}, + 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, } - def __init__(self, thumbprint_algorithm=None, thumbprint=None, format=None): - super(Certificate, self).__init__() - self.thumbprint_algorithm = thumbprint_algorithm - self.thumbprint = thumbprint - self.format = format - self.provisioning_state = None - self.provisioning_state_transition_time = None - self.previous_provisioning_state = None - self.previous_provisioning_state_transition_time = None - self.public_data = None - self.delete_certificate_error = None + def __init__(self, *, image_reference, node_agent_sku_id: str, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, **kwargs) -> None: + super(VirtualMachineConfiguration, self).__init__(**kwargs) + self.image_reference = image_reference + self.node_agent_sku_id = node_agent_sku_id + self.windows_configuration = windows_configuration + self.data_disks = data_disks + self.license_type = license_type + self.container_configuration = container_configuration -class CertificateCreateOrUpdateParameters(ProxyResource): - """Contains information about a certificate. +class VirtualMachineFamilyCoreQuota(Model): + """A VM Family and its associated core quota for the Batch account. Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. + :ivar name: The Virtual Machine family name. :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param thumbprint_algorithm: The algorithm of the certificate thumbprint. - This must match the first portion of the certificate name. Currently - required to be 'SHA1'. - :type thumbprint_algorithm: str - :param thumbprint: The thumbprint of the certificate. This must match the - thumbprint from the name. - :type thumbprint: str - :param format: The format of the certificate - either Pfx or Cer. If - omitted, the default is Pfx. Possible values include: 'Pfx', 'Cer' - :type format: str or ~azure.mgmt.batch.models.CertificateFormat - :param data: The base64-encoded contents of the certificate. The maximum - size is 10KB. - :type data: str - :param password: The password to access the certificate's private key. - This is required if the certificate format is pfx and must be omitted if - the certificate format is cer. - :type password: str + :ivar core_quota: The core quota for the VM family for the Batch account. + :vartype core_quota: int """ _validation = { - 'id': {'readonly': True}, 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'data': {'required': True}, + 'core_quota': {'readonly': True}, } _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'thumbprint_algorithm': {'key': 'properties.thumbprintAlgorithm', 'type': 'str'}, - 'thumbprint': {'key': 'properties.thumbprint', 'type': 'str'}, - 'format': {'key': 'properties.format', 'type': 'CertificateFormat'}, - 'data': {'key': 'properties.data', 'type': 'str'}, - 'password': {'key': 'properties.password', 'type': 'str'}, + 'core_quota': {'key': 'coreQuota', 'type': 'int'}, } - def __init__(self, data, thumbprint_algorithm=None, thumbprint=None, format=None, password=None): - super(CertificateCreateOrUpdateParameters, self).__init__() - self.thumbprint_algorithm = thumbprint_algorithm - self.thumbprint = thumbprint - self.format = format - self.data = data - self.password = password - + def __init__(self, **kwargs) -> None: + super(VirtualMachineFamilyCoreQuota, self).__init__(**kwargs) + self.name = None + self.core_quota = None -class Pool(ProxyResource): - """Contains information about a pool. - Variables are only populated by the server, and will be ignored when - sending a request. +class WindowsConfiguration(Model): + """Windows operating system settings to apply to the virtual machine. - :ivar id: The ID of the resource. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. - :vartype type: str - :ivar etag: The ETag of the resource, used for concurrency statements. - :vartype etag: str - :param display_name: The display name for the pool. The display name need - not be unique and can contain any Unicode characters up to a maximum - length of 1024. - :type display_name: str - :ivar last_modified: The last modified time of the pool. This is the last - time at which the pool level data, such as the targetDedicatedNodes or - autoScaleSettings, changed. It does not factor in node-level changes such - as a compute node changing state. - :vartype last_modified: datetime - :ivar creation_time: The creation time of the pool. - :vartype creation_time: datetime - :ivar provisioning_state: The current state of the pool. Possible values - include: 'Succeeded', 'Deleting' - :vartype provisioning_state: str or - ~azure.mgmt.batch.models.PoolProvisioningState - :ivar provisioning_state_transition_time: The time at which the pool - entered its current state. - :vartype provisioning_state_transition_time: datetime - :ivar allocation_state: Whether the pool is resizing. Possible values - include: 'Steady', 'Resizing', 'Stopping' - :vartype allocation_state: str or ~azure.mgmt.batch.models.AllocationState - :ivar allocation_state_transition_time: The time at which the pool entered - its current allocation state. - :vartype allocation_state_transition_time: datetime - :param vm_size: The size of virtual machines in the pool. All VMs in a - pool are the same size. For information about available sizes of virtual - machines for Cloud Services pools (pools created with - cloudServiceConfiguration), see Sizes for Cloud Services - (http://azure.microsoft.com/documentation/articles/cloud-services-sizes-specs/). - Batch supports all Cloud Services VM sizes except ExtraSmall. For - information about available VM sizes for pools using images from the - Virtual Machines Marketplace (pools created with - virtualMachineConfiguration) see Sizes for Virtual Machines (Linux) - (https://azure.microsoft.com/documentation/articles/virtual-machines-linux-sizes/) - or Sizes for Virtual Machines (Windows) - (https://azure.microsoft.com/documentation/articles/virtual-machines-windows-sizes/). - Batch supports all Azure VM sizes except STANDARD_A0 and those with - premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). - :type vm_size: str - :param deployment_configuration: This property describes how the pool - nodes will be deployed - using Cloud Services or Virtual Machines. Using - CloudServiceConfiguration specifies that the nodes should be creating - using Azure Cloud Services (PaaS), while VirtualMachineConfiguration uses - Azure Virtual Machines (IaaS). - :type deployment_configuration: - ~azure.mgmt.batch.models.DeploymentConfiguration - :ivar current_dedicated_nodes: The number of compute nodes currently in - the pool. - :vartype current_dedicated_nodes: int - :ivar current_low_priority_nodes: The number of low priority compute nodes - currently in the pool. - :vartype current_low_priority_nodes: int - :param scale_settings: Settings which configure the number of nodes in the - pool. - :type scale_settings: ~azure.mgmt.batch.models.ScaleSettings - :ivar auto_scale_run: The results and errors from the last execution of - the autoscale formula. This property is set only if the pool automatically - scales, i.e. autoScaleSettings are used. - :vartype auto_scale_run: ~azure.mgmt.batch.models.AutoScaleRun - :param inter_node_communication: Whether the pool permits direct - communication between nodes. This imposes restrictions on which nodes can - be assigned to the pool. Enabling this value can reduce the chance of the - requested number of nodes to be allocated in the pool. If not specified, - this value defaults to 'Disabled'. Possible values include: 'Enabled', - 'Disabled' - :type inter_node_communication: str or - ~azure.mgmt.batch.models.InterNodeCommunicationState - :param network_configuration: The network configuration for the pool. - :type network_configuration: ~azure.mgmt.batch.models.NetworkConfiguration - :param max_tasks_per_node: The maximum number of tasks that can run - concurrently on a single compute node in the pool. - :type max_tasks_per_node: int - :param task_scheduling_policy: How tasks are distributed across compute - nodes in a pool. - :type task_scheduling_policy: - ~azure.mgmt.batch.models.TaskSchedulingPolicy - :param user_accounts: The list of user accounts to be created on each node - in the pool. - :type user_accounts: list[~azure.mgmt.batch.models.UserAccount] - :param metadata: A list of name-value pairs associated with the pool as - metadata. The Batch service does not assign any meaning to metadata; it is - solely for the use of user code. - :type metadata: list[~azure.mgmt.batch.models.MetadataItem] - :param start_task: A task specified to run on each compute node as it - joins the pool. In an PATCH (update) operation, this property can be set - to an empty object to remove the start task from the pool. - :type start_task: ~azure.mgmt.batch.models.StartTask - :param certificates: The list of certificates to be installed on each - compute node in the pool. For Windows compute nodes, the Batch service - installs the certificates to the specified certificate store and location. - For Linux compute nodes, the certificates are stored in a directory inside - the task working directory and an environment variable - AZ_BATCH_CERTIFICATES_DIR is supplied to the task to query for this - location. For certificates with visibility of 'remoteUser', a 'certs' - directory is created in the user's home directory (e.g., - /home/{user-name}/certs) and certificates are placed in that directory. - :type certificates: list[~azure.mgmt.batch.models.CertificateReference] - :param application_packages: The list of application packages to be - installed on each compute node in the pool. Changes to application - packages affect all new compute nodes joining the pool, but do not affect - compute nodes that are already in the pool until they are rebooted or - reimaged. - :type application_packages: - list[~azure.mgmt.batch.models.ApplicationPackageReference] - :param application_licenses: The list of application licenses the Batch - service will make available on each compute node in the pool. The list of - application licenses must be a subset of available Batch service - application licenses. If a license is requested which is not supported, - pool creation will fail. - :type application_licenses: list[str] - :ivar resize_operation_status: Contains details about the current or last - completed resize operation. - :vartype resize_operation_status: - ~azure.mgmt.batch.models.ResizeOperationStatus + :param enable_automatic_updates: Whether automatic updates are enabled on + the virtual machine. If omitted, the default value is true. + :type enable_automatic_updates: bool """ - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'etag': {'readonly': True}, - 'last_modified': {'readonly': True}, - 'creation_time': {'readonly': True}, - 'provisioning_state': {'readonly': True}, - 'provisioning_state_transition_time': {'readonly': True}, - 'allocation_state': {'readonly': True}, - 'allocation_state_transition_time': {'readonly': True}, - 'current_dedicated_nodes': {'readonly': True}, - 'current_low_priority_nodes': {'readonly': True}, - 'auto_scale_run': {'readonly': True}, - 'resize_operation_status': {'readonly': True}, + _attribute_map = { + 'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'}, } + def __init__(self, *, enable_automatic_updates: bool=None, **kwargs) -> None: + super(WindowsConfiguration, self).__init__(**kwargs) + self.enable_automatic_updates = enable_automatic_updates + + +class WindowsUserConfiguration(Model): + """Properties used to create a user account on a Windows node. + + :param login_mode: Login mode for user. Specifies login mode for the user. + The default value for VirtualMachineConfiguration pools is interactive + mode and for CloudServiceConfiguration pools is batch mode. Possible + values include: 'Batch', 'Interactive' + :type login_mode: str or ~azure.mgmt.batch.models.LoginMode + """ + _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, - 'display_name': {'key': 'properties.displayName', 'type': 'str'}, - 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, - 'creation_time': {'key': 'properties.creationTime', 'type': 'iso-8601'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'PoolProvisioningState'}, - 'provisioning_state_transition_time': {'key': 'properties.provisioningStateTransitionTime', 'type': 'iso-8601'}, - 'allocation_state': {'key': 'properties.allocationState', 'type': 'AllocationState'}, - 'allocation_state_transition_time': {'key': 'properties.allocationStateTransitionTime', 'type': 'iso-8601'}, - 'vm_size': {'key': 'properties.vmSize', 'type': 'str'}, - 'deployment_configuration': {'key': 'properties.deploymentConfiguration', 'type': 'DeploymentConfiguration'}, - 'current_dedicated_nodes': {'key': 'properties.currentDedicatedNodes', 'type': 'int'}, - 'current_low_priority_nodes': {'key': 'properties.currentLowPriorityNodes', 'type': 'int'}, - 'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'}, - 'auto_scale_run': {'key': 'properties.autoScaleRun', 'type': 'AutoScaleRun'}, - 'inter_node_communication': {'key': 'properties.interNodeCommunication', 'type': 'InterNodeCommunicationState'}, - 'network_configuration': {'key': 'properties.networkConfiguration', 'type': 'NetworkConfiguration'}, - 'max_tasks_per_node': {'key': 'properties.maxTasksPerNode', 'type': 'int'}, - 'task_scheduling_policy': {'key': 'properties.taskSchedulingPolicy', 'type': 'TaskSchedulingPolicy'}, - 'user_accounts': {'key': 'properties.userAccounts', 'type': '[UserAccount]'}, - 'metadata': {'key': 'properties.metadata', 'type': '[MetadataItem]'}, - 'start_task': {'key': 'properties.startTask', 'type': 'StartTask'}, - 'certificates': {'key': 'properties.certificates', 'type': '[CertificateReference]'}, - 'application_packages': {'key': 'properties.applicationPackages', 'type': '[ApplicationPackageReference]'}, - 'application_licenses': {'key': 'properties.applicationLicenses', 'type': '[str]'}, - 'resize_operation_status': {'key': 'properties.resizeOperationStatus', 'type': 'ResizeOperationStatus'}, + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, } - def __init__(self, display_name=None, vm_size=None, deployment_configuration=None, scale_settings=None, inter_node_communication=None, network_configuration=None, max_tasks_per_node=None, task_scheduling_policy=None, user_accounts=None, metadata=None, start_task=None, certificates=None, application_packages=None, application_licenses=None): - super(Pool, self).__init__() - self.display_name = display_name - self.last_modified = None - self.creation_time = None - self.provisioning_state = None - self.provisioning_state_transition_time = None - self.allocation_state = None - self.allocation_state_transition_time = None - self.vm_size = vm_size - self.deployment_configuration = deployment_configuration - self.current_dedicated_nodes = None - self.current_low_priority_nodes = None - self.scale_settings = scale_settings - self.auto_scale_run = None - self.inter_node_communication = inter_node_communication - self.network_configuration = network_configuration - self.max_tasks_per_node = max_tasks_per_node - self.task_scheduling_policy = task_scheduling_policy - self.user_accounts = user_accounts - self.metadata = metadata - self.start_task = start_task - self.certificates = certificates - self.application_packages = application_packages - self.application_licenses = application_licenses - self.resize_operation_status = None + def __init__(self, *, login_mode=None, **kwargs) -> None: + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = login_mode diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_paged_models.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_paged_models.py index 1eb94dd020cd..1865025a41f0 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_paged_models.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/models/_paged_models.py @@ -25,8 +25,6 @@ class BatchAccountPaged(Paged): def __init__(self, *args, **kwargs): super(BatchAccountPaged, self).__init__(*args, **kwargs) - - class ApplicationPackagePaged(Paged): """ A paging container for iterating over a list of :class:`ApplicationPackage ` object @@ -40,8 +38,6 @@ class ApplicationPackagePaged(Paged): def __init__(self, *args, **kwargs): super(ApplicationPackagePaged, self).__init__(*args, **kwargs) - - class ApplicationPaged(Paged): """ A paging container for iterating over a list of :class:`Application ` object @@ -55,8 +51,6 @@ class ApplicationPaged(Paged): def __init__(self, *args, **kwargs): super(ApplicationPaged, self).__init__(*args, **kwargs) - - class OperationPaged(Paged): """ A paging container for iterating over a list of :class:`Operation ` object @@ -70,8 +64,6 @@ class OperationPaged(Paged): def __init__(self, *args, **kwargs): super(OperationPaged, self).__init__(*args, **kwargs) - - class CertificatePaged(Paged): """ A paging container for iterating over a list of :class:`Certificate ` object @@ -85,8 +77,6 @@ class CertificatePaged(Paged): def __init__(self, *args, **kwargs): super(CertificatePaged, self).__init__(*args, **kwargs) - - class PoolPaged(Paged): """ A paging container for iterating over a list of :class:`Pool ` object diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/__init__.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/__init__.py index 457cbd3470cd..818f748e2734 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/__init__.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/__init__.py @@ -9,13 +9,13 @@ # regenerated. # -------------------------------------------------------------------------- -from .batch_account_operations import BatchAccountOperations -from .application_package_operations import ApplicationPackageOperations -from .application_operations import ApplicationOperations -from .location_operations import LocationOperations -from .operations import Operations -from .certificate_operations import CertificateOperations -from .pool_operations import PoolOperations +from ._batch_account_operations import BatchAccountOperations +from ._application_package_operations import ApplicationPackageOperations +from ._application_operations import ApplicationOperations +from ._location_operations import LocationOperations +from ._operations import Operations +from ._certificate_operations import CertificateOperations +from ._pool_operations import PoolOperations __all__ = [ 'BatchAccountOperations', diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/application_operations.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_application_operations.py similarity index 92% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/application_operations.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_application_operations.py index b447de1a57b8..2ceacf73cd3c 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/application_operations.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_application_operations.py @@ -19,11 +19,13 @@ class ApplicationOperations(object): """ApplicationOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The API version to be used with the HTTP request. Constant value: "2018-12-01". + :ivar api_version: The API version to be used with the HTTP request. Constant value: "2019-04-01". """ models = models @@ -33,7 +35,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-12-01" + self.api_version = "2019-04-01" self.config = config @@ -77,6 +79,7 @@ def create( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -92,9 +95,8 @@ def create( body_content = None # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -102,7 +104,6 @@ def create( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Application', response) @@ -150,7 +151,6 @@ def delete( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -159,8 +159,8 @@ def delete( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: exp = CloudError(response) @@ -210,7 +210,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -219,8 +219,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -228,7 +228,6 @@ def get( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Application', response) @@ -279,6 +278,7 @@ def update( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -291,9 +291,8 @@ def update( body_content = self._serialize.body(parameters, 'Application') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -301,7 +300,6 @@ def update( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('Application', response) @@ -334,8 +332,7 @@ def list( ~azure.mgmt.batch.models.ApplicationPaged[~azure.mgmt.batch.models.Application] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] @@ -358,7 +355,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -367,9 +364,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -379,12 +380,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.ApplicationPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.ApplicationPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.ApplicationPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications'} diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/application_package_operations.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_application_package_operations.py similarity index 92% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/application_package_operations.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_application_package_operations.py index 0823f92e92f0..a3e792b0b3af 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/application_package_operations.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_application_package_operations.py @@ -19,11 +19,13 @@ class ApplicationPackageOperations(object): """ApplicationPackageOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The API version to be used with the HTTP request. Constant value: "2018-12-01". + :ivar api_version: The API version to be used with the HTTP request. Constant value: "2019-04-01". """ models = models @@ -33,7 +35,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-12-01" + self.api_version = "2019-04-01" self.config = config @@ -82,6 +84,7 @@ def activate( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -94,9 +97,8 @@ def activate( body_content = self._serialize.body(parameters, 'ActivateApplicationPackageParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -104,7 +106,6 @@ def activate( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('ApplicationPackage', response) @@ -158,6 +159,7 @@ def create( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -173,9 +175,8 @@ def create( body_content = None # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -183,7 +184,6 @@ def create( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('ApplicationPackage', response) @@ -234,7 +234,6 @@ def delete( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -243,8 +242,8 @@ def delete( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 204]: exp = CloudError(response) @@ -297,7 +296,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -306,8 +305,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -315,7 +314,6 @@ def get( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('ApplicationPackage', response) @@ -351,8 +349,7 @@ def list( ~azure.mgmt.batch.models.ApplicationPackagePaged[~azure.mgmt.batch.models.ApplicationPackage] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] @@ -376,7 +373,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -385,9 +382,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -397,12 +398,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.ApplicationPackagePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.ApplicationPackagePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.ApplicationPackagePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationName}/versions'} diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/batch_account_operations.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_batch_account_operations.py similarity index 84% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/batch_account_operations.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_batch_account_operations.py index 70fc40e1b618..635b97d5e875 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/batch_account_operations.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_batch_account_operations.py @@ -12,8 +12,8 @@ import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError -from msrest.exceptions import DeserializationError -from msrestazure.azure_operation import AzureOperationPoller +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling from .. import models @@ -21,11 +21,13 @@ class BatchAccountOperations(object): """BatchAccountOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The API version to be used with the HTTP request. Constant value: "2018-12-01". + :ivar api_version: The API version to be used with the HTTP request. Constant value: "2019-04-01". """ models = models @@ -35,7 +37,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-12-01" + self.api_version = "2019-04-01" self.config = config @@ -57,6 +59,7 @@ def _create_initial( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -69,9 +72,8 @@ def _create_initial( body_content = self._serialize.body(parameters, 'BatchAccountCreateParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) @@ -90,16 +92,13 @@ def _create_initial( if raw: client_raw_response = ClientRawResponse(deserialized, response) - try: - client_raw_response.add_headers(header_dict) - except DeserializationError: - pass # Deserialization of Headers here can fail + client_raw_response.add_headers(header_dict) return client_raw_response return deserialized def create( - self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, **operation_config): + self, resource_group_name, account_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config): """Creates a new Batch account with the specified parameters. Existing accounts cannot be updated with this API and should instead be updated with the Update Batch Account API. @@ -118,13 +117,16 @@ def create( :type parameters: ~azure.mgmt.batch.models.BatchAccountCreateParameters :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns BatchAccount - or ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns BatchAccount or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.BatchAccount] - or ~msrest.pipeline.ClientRawResponse + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.batch.models.BatchAccount]] :raises: :class:`CloudError` """ raw_result = self._create_initial( @@ -135,30 +137,8 @@ def create( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200, 202]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - header_dict = { 'Location': 'str', 'Retry-After': 'int', @@ -172,12 +152,13 @@ def get_long_running_output(response): return deserialized - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}'} def update( @@ -221,6 +202,7 @@ def update( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -233,9 +215,8 @@ def update( body_content = self._serialize.body(parameters, 'BatchAccountUpdateParameters') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -243,7 +224,6 @@ def update( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('BatchAccount', response) @@ -272,7 +252,6 @@ def _delete_initial( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -281,8 +260,8 @@ def _delete_initial( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) @@ -299,7 +278,7 @@ def _delete_initial( return client_raw_response def delete( - self, resource_group_name, account_name, custom_headers=None, raw=False, **operation_config): + self, resource_group_name, account_name, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes the specified Batch account. :param resource_group_name: The name of the resource group that @@ -308,12 +287,14 @@ def delete( :param account_name: The name of the Batch account. :type account_name: str :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns None or - ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrest.pipeline.ClientRawResponse + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError` """ raw_result = self._delete_initial( @@ -323,30 +304,8 @@ def delete( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200, 202, 204]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ @@ -355,12 +314,13 @@ def get_long_running_output(response): }) return client_raw_response - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}'} def get( @@ -397,7 +357,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -406,8 +366,8 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -415,7 +375,6 @@ def get( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('BatchAccount', response) @@ -441,8 +400,7 @@ def list( ~azure.mgmt.batch.models.BatchAccountPaged[~azure.mgmt.batch.models.BatchAccount] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] @@ -461,7 +419,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -470,9 +428,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -482,12 +444,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.BatchAccountPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.BatchAccountPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.BatchAccountPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Batch/batchAccounts'} @@ -510,8 +470,7 @@ def list_by_resource_group( ~azure.mgmt.batch.models.BatchAccountPaged[~azure.mgmt.batch.models.BatchAccount] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] @@ -531,7 +490,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -540,9 +499,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -552,12 +515,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.BatchAccountPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.BatchAccountPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.BatchAccountPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts'} @@ -596,7 +557,6 @@ def synchronize_auto_storage_keys( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -605,8 +565,8 @@ def synchronize_auto_storage_keys( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [204]: exp = CloudError(response) @@ -657,6 +617,7 @@ def regenerate_key( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -669,9 +630,8 @@ def regenerate_key( body_content = self._serialize.body(parameters, 'BatchAccountRegenerateKeyParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -679,7 +639,6 @@ def regenerate_key( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('BatchAccountKeys', response) @@ -730,7 +689,7 @@ def get_keys( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -739,8 +698,8 @@ def get_keys( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -748,7 +707,6 @@ def get_keys( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('BatchAccountKeys', response) diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/certificate_operations.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_certificate_operations.py similarity index 84% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/certificate_operations.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_certificate_operations.py index 8db5cd991d95..eb726f288a3f 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/certificate_operations.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_certificate_operations.py @@ -12,8 +12,8 @@ import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError -from msrest.exceptions import DeserializationError -from msrestazure.azure_operation import AzureOperationPoller +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling from .. import models @@ -21,11 +21,13 @@ class CertificateOperations(object): """CertificateOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The API version to be used with the HTTP request. Constant value: "2018-12-01". + :ivar api_version: The API version to be used with the HTTP request. Constant value: "2019-04-01". """ models = models @@ -35,7 +37,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-12-01" + self.api_version = "2019-04-01" self.config = config @@ -69,8 +71,7 @@ def list_by_batch_account( ~azure.mgmt.batch.models.CertificatePaged[~azure.mgmt.batch.models.Certificate] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_batch_account.metadata['url'] @@ -97,7 +98,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -106,9 +107,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -118,12 +123,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.CertificatePaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.CertificatePaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.CertificatePaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_batch_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates'} @@ -147,6 +150,7 @@ def _create_initial( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -163,9 +167,8 @@ def _create_initial( body_content = self._serialize.body(parameters, 'CertificateCreateOrUpdateParameters') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -183,16 +186,13 @@ def _create_initial( if raw: client_raw_response = ClientRawResponse(deserialized, response) - try: - client_raw_response.add_headers(header_dict) - except DeserializationError: - pass # Deserialization of Headers here can fail + client_raw_response.add_headers(header_dict) return client_raw_response return deserialized def create( - self, resource_group_name, account_name, certificate_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, **operation_config): + self, resource_group_name, account_name, certificate_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, polling=True, **operation_config): """Creates a new certificate inside the specified account. :param resource_group_name: The name of the resource group that @@ -217,13 +217,16 @@ def create( will be ignored. :type if_none_match: str :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns Certificate - or ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns Certificate or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.Certificate] - or ~msrest.pipeline.ClientRawResponse + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.batch.models.Certificate]] :raises: :class:`CloudError` """ raw_result = self._create_initial( @@ -237,30 +240,8 @@ def create( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - header_dict = { 'ETag': 'str', } @@ -273,12 +254,13 @@ def get_long_running_output(response): return deserialized - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}'} def update( @@ -327,6 +309,7 @@ def update( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -341,18 +324,16 @@ def update( body_content = self._serialize.body(parameters, 'CertificateCreateOrUpdateParameters') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp - deserialized = None header_dict = {} - + deserialized = None if response.status_code == 200: deserialized = self._deserialize('Certificate', response) header_dict = { @@ -386,7 +367,6 @@ def _delete_initial( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -395,8 +375,8 @@ def _delete_initial( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) @@ -413,7 +393,7 @@ def _delete_initial( return client_raw_response def delete( - self, resource_group_name, account_name, certificate_name, custom_headers=None, raw=False, **operation_config): + self, resource_group_name, account_name, certificate_name, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes the specified certificate. :param resource_group_name: The name of the resource group that @@ -426,12 +406,14 @@ def delete( match the certificate data in the request. For example SHA1-a3d1c5. :type certificate_name: str :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns None or - ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrest.pipeline.ClientRawResponse + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError` """ raw_result = self._delete_initial( @@ -442,30 +424,8 @@ def delete( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200, 202, 204]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ @@ -474,12 +434,13 @@ def get_long_running_output(response): }) return client_raw_response - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/certificates/{certificateName}'} def get( @@ -521,7 +482,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -530,17 +491,16 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp - deserialized = None header_dict = {} - + deserialized = None if response.status_code == 200: deserialized = self._deserialize('Certificate', response) header_dict = { @@ -603,7 +563,7 @@ def cancel_deletion( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -612,17 +572,16 @@ def cancel_deletion( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp - deserialized = None header_dict = {} - + deserialized = None if response.status_code == 200: deserialized = self._deserialize('Certificate', response) header_dict = { diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/location_operations.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_location_operations.py similarity index 91% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/location_operations.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_location_operations.py index 84b9be79e2c0..686dc2b3097b 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/location_operations.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_location_operations.py @@ -19,11 +19,13 @@ class LocationOperations(object): """LocationOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The API version to be used with the HTTP request. Constant value: "2018-12-01". + :ivar api_version: The API version to be used with the HTTP request. Constant value: "2019-04-01". """ models = models @@ -33,7 +35,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-12-01" + self.api_version = "2019-04-01" self.config = config @@ -69,7 +71,7 @@ def get_quotas( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -78,8 +80,8 @@ def get_quotas( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -87,7 +89,6 @@ def get_quotas( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('BatchLocationQuota', response) @@ -133,6 +134,7 @@ def check_name_availability( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -145,9 +147,8 @@ def check_name_availability( body_content = self._serialize.body(parameters, 'CheckNameAvailabilityParameters') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -155,7 +156,6 @@ def check_name_availability( raise exp deserialized = None - if response.status_code == 200: deserialized = self._deserialize('CheckNameAvailabilityResult', response) diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/operations.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_operations.py similarity index 82% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/operations.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_operations.py index 4774273e7176..799529b363cb 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/operations.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_operations.py @@ -19,11 +19,13 @@ class Operations(object): """Operations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The API version to be used with the HTTP request. Constant value: "2018-12-01". + :ivar api_version: The API version to be used with the HTTP request. Constant value: "2019-04-01". """ models = models @@ -33,7 +35,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-12-01" + self.api_version = "2019-04-01" self.config = config @@ -51,8 +53,7 @@ def list( ~azure.mgmt.batch.models.OperationPaged[~azure.mgmt.batch.models.Operation] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list.metadata['url'] @@ -67,7 +68,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -76,9 +77,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -88,12 +93,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.OperationPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.OperationPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.OperationPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list.metadata = {'url': '/providers/Microsoft.Batch/operations'} diff --git a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/pool_operations.py b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_pool_operations.py similarity index 85% rename from sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/pool_operations.py rename to sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_pool_operations.py index 02cab779f816..95715cda042a 100644 --- a/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/pool_operations.py +++ b/sdk/batch/azure-mgmt-batch/azure/mgmt/batch/operations/_pool_operations.py @@ -12,8 +12,8 @@ import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError -from msrest.exceptions import DeserializationError -from msrestazure.azure_operation import AzureOperationPoller +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling from .. import models @@ -21,11 +21,13 @@ class PoolOperations(object): """PoolOperations operations. + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The API version to be used with the HTTP request. Constant value: "2018-12-01". + :ivar api_version: The API version to be used with the HTTP request. Constant value: "2019-04-01". """ models = models @@ -35,7 +37,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-12-01" + self.api_version = "2019-04-01" self.config = config @@ -79,8 +81,7 @@ def list_by_batch_account( ~azure.mgmt.batch.models.PoolPaged[~azure.mgmt.batch.models.Pool] :raises: :class:`CloudError` """ - def internal_paging(next_link=None, raw=False): - + def prepare_request(next_link=None): if not next_link: # Construct URL url = self.list_by_batch_account.metadata['url'] @@ -107,7 +108,7 @@ def internal_paging(next_link=None, raw=False): # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -116,9 +117,13 @@ def internal_paging(next_link=None, raw=False): header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send( - request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -128,12 +133,10 @@ def internal_paging(next_link=None, raw=False): return response # Deserialize response - deserialized = models.PoolPaged(internal_paging, self._deserialize.dependencies) - + header_dict = None if raw: header_dict = {} - client_raw_response = models.PoolPaged(internal_paging, self._deserialize.dependencies, header_dict) - return client_raw_response + deserialized = models.PoolPaged(internal_paging, self._deserialize.dependencies, header_dict) return deserialized list_by_batch_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools'} @@ -157,6 +160,7 @@ def _create_initial( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -173,9 +177,8 @@ def _create_initial( body_content = self._serialize.body(parameters, 'Pool') # Construct and send request - request = self._client.put(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) @@ -193,16 +196,13 @@ def _create_initial( if raw: client_raw_response = ClientRawResponse(deserialized, response) - try: - client_raw_response.add_headers(header_dict) - except DeserializationError: - pass # Deserialization of Headers here can fail + client_raw_response.add_headers(header_dict) return client_raw_response return deserialized def create( - self, resource_group_name, account_name, pool_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, **operation_config): + self, resource_group_name, account_name, pool_name, parameters, if_match=None, if_none_match=None, custom_headers=None, raw=False, polling=True, **operation_config): """Creates a new pool inside the specified account. :param resource_group_name: The name of the resource group that @@ -225,13 +225,16 @@ def create( ignored. :type if_none_match: str :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns Pool or - ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns Pool or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.batch.models.Pool] - or ~msrest.pipeline.ClientRawResponse + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.batch.models.Pool]] :raises: :class:`CloudError` """ raw_result = self._create_initial( @@ -245,30 +248,8 @@ def create( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - header_dict = { 'ETag': 'str', } @@ -281,12 +262,13 @@ def get_long_running_output(response): return deserialized - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} def update( @@ -335,6 +317,7 @@ def update( # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) @@ -349,18 +332,16 @@ def update( body_content = self._serialize.body(parameters, 'Pool') # Construct and send request - request = self._client.patch(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + request = self._client.patch(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp - deserialized = None header_dict = {} - + deserialized = None if response.status_code == 200: deserialized = self._deserialize('Pool', response) header_dict = { @@ -394,7 +375,6 @@ def _delete_initial( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -403,8 +383,8 @@ def _delete_initial( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.delete(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) @@ -421,7 +401,7 @@ def _delete_initial( return client_raw_response def delete( - self, resource_group_name, account_name, pool_name, custom_headers=None, raw=False, **operation_config): + self, resource_group_name, account_name, pool_name, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes the specified pool. :param resource_group_name: The name of the resource group that @@ -433,12 +413,14 @@ def delete( account. :type pool_name: str :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :return: An instance of AzureOperationPoller that returns None or - ClientRawResponse if raw=true + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrest.pipeline.ClientRawResponse + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError` """ raw_result = self._delete_initial( @@ -449,30 +431,8 @@ def delete( raw=True, **operation_config ) - if raw: - return raw_result - - # Construct and send request - def long_running_send(): - return raw_result.response - - def get_long_running_status(status_link, headers=None): - - request = self._client.get(status_link) - if headers: - request.headers.update(headers) - header_parameters = {} - header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id'] - return self._client.send( - request, header_parameters, stream=False, **operation_config) def get_long_running_output(response): - - if response.status_code not in [200, 202, 204]: - exp = CloudError(response) - exp.request_id = response.headers.get('x-ms-request-id') - raise exp - if raw: client_raw_response = ClientRawResponse(None, response) client_raw_response.add_headers({ @@ -481,12 +441,13 @@ def get_long_running_output(response): }) return client_raw_response - long_running_operation_timeout = operation_config.get( + lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) - return AzureOperationPoller( - long_running_send, get_long_running_output, - get_long_running_status, long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/pools/{poolName}'} def get( @@ -527,7 +488,7 @@ def get( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -536,17 +497,16 @@ def get( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.get(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp - deserialized = None header_dict = {} - + deserialized = None if response.status_code == 200: deserialized = self._deserialize('Pool', response) header_dict = { @@ -599,7 +559,7 @@ def disable_auto_scale( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -608,17 +568,16 @@ def disable_auto_scale( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp - deserialized = None header_dict = {} - + deserialized = None if response.status_code == 200: deserialized = self._deserialize('Pool', response) header_dict = { @@ -680,7 +639,7 @@ def stop_resize( # Construct headers header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' + header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: @@ -689,17 +648,16 @@ def stop_resize( header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request - request = self._client.post(url, query_parameters) - response = self._client.send(request, header_parameters, stream=False, **operation_config) + request = self._client.post(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp - deserialized = None header_dict = {} - + deserialized = None if response.status_code == 200: deserialized = self._deserialize('Pool', response) header_dict = {