From 7ad122318dfc1a64dbe34196babc83bca368f8b6 Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Sun, 9 Dec 2018 20:12:18 -0800 Subject: [PATCH 1/3] [AutoPR batch/data-plane] typo: batch/data-plane/Microsoft.Batch (#4030) * Generated from 842454426730490ff031714d56f9b76eaf80f8ba typo: batch/data-plane/Microsoft.Batch - upto -> up to - pre-empted -> preempted - visibile -> visible - comptue -> compute * Generated from 842454426730490ff031714d56f9b76eaf80f8ba typo: batch/data-plane/Microsoft.Batch - upto -> up to - pre-empted -> preempted - visibile -> visible - comptue -> compute --- .../azure/batch/batch_service_client.py | 4 - azure-batch/azure/batch/custom/__init__.py | 0 .../azure/batch/custom/custom_errors.py | 19 - azure-batch/azure/batch/custom/patch.py | 301 ---- .../models/batch_service_client_enums.py | 6 +- .../azure/batch/models/compute_node.py | 2 +- .../azure/batch/models/compute_node_py3.py | 2 +- .../azure/batch/models/exit_conditions.py | 4 +- .../azure/batch/models/exit_conditions_py3.py | 4 +- .../azure/batch/models/exit_options.py | 2 +- .../azure/batch/models/exit_options_py3.py | 2 +- .../batch/models/job_preparation_task.py | 2 +- .../batch/models/job_preparation_task_py3.py | 2 +- .../models/virtual_machine_configuration.py | 2 +- .../virtual_machine_configuration_py3.py | 2 +- .../operations/compute_node_operations.py | 10 +- .../azure/batch/operations/job_operations.py | 2 +- .../operations/job_schedule_operations.py | 2 +- .../azure/batch/operations/pool_operations.py | 2 +- azure-batch/azure/batch/version.py | 2 +- azure-batch/build.json | 1499 +++++++++++++++++ 21 files changed, 1523 insertions(+), 348 deletions(-) delete mode 100644 azure-batch/azure/batch/custom/__init__.py delete mode 100644 azure-batch/azure/batch/custom/custom_errors.py delete mode 100644 azure-batch/azure/batch/custom/patch.py create mode 100644 azure-batch/build.json diff --git a/azure-batch/azure/batch/batch_service_client.py b/azure-batch/azure/batch/batch_service_client.py index bf0d624c9c47..80c19929f9cb 100644 --- a/azure-batch/azure/batch/batch_service_client.py +++ b/azure-batch/azure/batch/batch_service_client.py @@ -23,7 +23,6 @@ from .operations.task_operations import TaskOperations from .operations.compute_node_operations import ComputeNodeOperations from . import models -from .custom.patch import patch_client class BatchServiceClientConfiguration(AzureConfiguration): @@ -113,6 +112,3 @@ def __init__( self._client, self.config, self._serialize, self._deserialize) self.compute_node = ComputeNodeOperations( self._client, self.config, self._serialize, self._deserialize) - - -patch_client() diff --git a/azure-batch/azure/batch/custom/__init__.py b/azure-batch/azure/batch/custom/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/azure-batch/azure/batch/custom/custom_errors.py b/azure-batch/azure/batch/custom/custom_errors.py deleted file mode 100644 index 17da723515f3..000000000000 --- a/azure-batch/azure/batch/custom/custom_errors.py +++ /dev/null @@ -1,19 +0,0 @@ -# -------------------------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for license information. -# -------------------------------------------------------------------------------------------- - - -class CreateTasksErrorException(Exception): - """ Aggregate Exception containing details for any failures from a task add operation. - - :param str message: Error message describing exit reason - :param [~TaskAddParameter] pending_task_list: List of tasks remaining to be submitted. - :param [~TaskAddResult] failure_tasks: List of tasks which failed to add - :param [~Exception] errors: List of unknown errors forcing early termination - """ - def __init__(self, message, pending_task_list=None, failure_tasks=None, errors=None): - self.message = message - self.pending_tasks = list(pending_task_list) - self.failure_tasks = list(failure_tasks) - self.errors = list(errors) diff --git a/azure-batch/azure/batch/custom/patch.py b/azure-batch/azure/batch/custom/patch.py deleted file mode 100644 index 7fc04b15a898..000000000000 --- a/azure-batch/azure/batch/custom/patch.py +++ /dev/null @@ -1,301 +0,0 @@ -import collections -import importlib -import logging -import threading -import types -import sys - -from ..models import BatchErrorException, TaskAddCollectionResult, TaskAddStatus -from ..custom.custom_errors import CreateTasksErrorException -from ..operations.task_operations import TaskOperations - -MAX_TASKS_PER_REQUEST = 100 -_LOGGER = logging.getLogger(__name__) - -class _TaskWorkflowManager(object): - """Worker class for one add_collection request - - :param ~TaskOperations task_operations: Parent object which instantiated this - :param str job_id: The ID of the job to which the task collection is to be - added. - :param tasks_to_add: The collection of tasks to add. - :type tasks_to_add: list of :class:`TaskAddParameter - ` - :param task_add_collection_options: Additional parameters for the - operation - :type task_add_collection_options: :class:`TaskAddCollectionOptions - ` - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - """ - - def __init__( - self, - client, - original_add_collection, - job_id, - tasks_to_add, - task_add_collection_options=None, - custom_headers=None, - raw=False, - **kwargs): - # Append operations thread safe - Only read once all threads have completed - # List of tasks which failed to add due to a returned client error - self._failure_tasks = collections.deque() - # List of unknown exceptions which occurred during requests. - self._errors = collections.deque() - - # synchronized through lock variables - self.error = None # Only written once all threads have completed - self._max_tasks_per_request = MAX_TASKS_PER_REQUEST - self._tasks_to_add = collections.deque(tasks_to_add) - - self._error_lock = threading.Lock() - self._max_tasks_lock = threading.Lock() - self._pending_queue_lock = threading.Lock() - - # Variables to be used for task add_collection requests - self._client = client - self._original_add_collection = original_add_collection - self._job_id = job_id - self._task_add_collection_options = task_add_collection_options - self._custom_headers = custom_headers - self._raw = raw - self._kwargs = dict(**kwargs) - - def _bulk_add_tasks(self, results_queue, chunk_tasks_to_add): - """Adds a chunk of tasks to the job - - Retry chunk if body exceeds the maximum request size and retry tasks - if failed due to server errors. - - :param results_queue: Queue to place the return value of the request - :type results_queue: collections.deque - :param chunk_tasks_to_add: Chunk of at most 100 tasks with retry details - :type chunk_tasks_to_add: list[~TrackedCloudTask] - """ - - try: - add_collection_response = self._original_add_collection( - self._client, - self._job_id, - chunk_tasks_to_add, - self._task_add_collection_options, - self._custom_headers, - self._raw) - except BatchErrorException as e: - # In case of a chunk exceeding the MaxMessageSize split chunk in half - # and resubmit smaller chunk requests - # TODO: Replace string with constant variable once available in SDK - if e.error.code == "RequestBodyTooLarge": # pylint: disable=no-member - # In this case the task is misbehaved and will not be able to be added due to: - # 1) The task exceeding the max message size - # 2) A single cell of the task exceeds the per-cell limit, or - # 3) Sum of all cells exceeds max row limit - if len(chunk_tasks_to_add) == 1: - failed_task = chunk_tasks_to_add.pop() - self._errors.appendleft(e) - _LOGGER.error("Failed to add task with ID %s due to the body" - " exceeding the maximum request size", failed_task.id) - else: - # Assumption: Tasks are relatively close in size therefore if one batch exceeds size limit - # we should decrease the initial task collection size to avoid repeating the error - # Midpoint is lower bounded by 1 due to above base case - midpoint = int(len(chunk_tasks_to_add) / 2) - # Restrict one thread at a time to do this compare and set, - # therefore forcing max_tasks_per_request to be strictly decreasing - with self._max_tasks_lock: - if midpoint < self._max_tasks_per_request: - self._max_tasks_per_request = midpoint - _LOGGER.info("Amount of tasks per request reduced from %s to %s due to the" - " request body being too large", str(self._max_tasks_per_request), - str(midpoint)) - - # Not the most efficient solution for all cases, but the goal of this is to handle this - # exception and have it work in all cases where tasks are well behaved - # Behavior retries as a smaller chunk and - # appends extra tasks to queue to be picked up by another thread . - self._tasks_to_add.extendleft(chunk_tasks_to_add[midpoint:]) - self._bulk_add_tasks(results_queue, chunk_tasks_to_add[:midpoint]) - # Retry server side errors - elif 500 <= e.response.status_code <= 599: - self._tasks_to_add.extendleft(chunk_tasks_to_add) - else: - # Re-add to pending queue as unknown status / don't have result - self._tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self._errors.appendleft(e) - except Exception as e: # pylint: disable=broad-except - # Re-add to pending queue as unknown status / don't have result - self._tasks_to_add.extendleft(chunk_tasks_to_add) - # Unknown State - don't know if tasks failed to add or were successful - self._errors.appendleft(e) - else: - try: - add_collection_response = add_collection_response.output - except AttributeError: - pass - - for task_result in add_collection_response.value: # pylint: disable=no-member - if task_result.status == TaskAddStatus.server_error: - # Server error will be retried - with self._pending_queue_lock: - for task in chunk_tasks_to_add: - if task.id == task_result.task_id: - self._tasks_to_add.appendleft(task) - elif (task_result.status == TaskAddStatus.client_error - and not task_result.error.code == "TaskExists"): - # Client error will be recorded unless Task already exists - self._failure_tasks.appendleft(task_result) - else: - results_queue.appendleft(task_result) - - def task_collection_thread_handler(self, results_queue): - """Main method for worker to run - - Pops a chunk of tasks off the collection of pending tasks to be added and submits them to be added. - - :param collections.deque results_queue: Queue for worker to output results to - """ - # Add tasks until either we run out or we run into an unexpected error - while self._tasks_to_add and not self._errors: - max_tasks = self._max_tasks_per_request # local copy - chunk_tasks_to_add = [] - with self._pending_queue_lock: - while len(chunk_tasks_to_add) < max_tasks and self._tasks_to_add: - chunk_tasks_to_add.append(self._tasks_to_add.pop()) - - if chunk_tasks_to_add: - self._bulk_add_tasks(results_queue, chunk_tasks_to_add) - - # Only define error if all threads have finished and there were failures - with self._error_lock: - if threading.active_count() == 1 and (self._failure_tasks or self._errors): - self.error = CreateTasksErrorException( - "One or more tasks failed to be added", - self._failure_tasks, - self._tasks_to_add, - self._errors) - - -def _handle_output(results_queue): - """Scan output for exceptions - - If there is an output from an add task collection call add it to the results. - - :param results_queue: Queue containing results of attempted add_collection's - :type results_queue: collections.deque - :return: list of TaskAddResults - :rtype: list[~TaskAddResult] - """ - results = [] - while results_queue: - queue_item = results_queue.pop() - results.append(queue_item) - return results - - -def build_new_add_collection(original_add_collection): - def bulk_add_collection( - self, - job_id, - value, - task_add_collection_options=None, - custom_headers=None, - raw=False, - threads=0, - **operation_config): - """Adds a collection of tasks to the specified job. - - Note that each task must have a unique ID. The Batch service may not - return the results for each task in the same order the tasks were - submitted in this request. If the server times out or the connection is - closed during the request, the request may have been partially or fully - processed, or not at all. In such cases, the user should re-issue the - request. Note that it is up to the user to correctly handle failures - when re-issuing a request. For example, you should use the same task - IDs during a retry so that if the prior operation succeeded, the retry - will not create extra tasks unexpectedly. If the response contains any - tasks which failed to add, a client can retry the request. In a retry, - it is most efficient to resubmit only tasks that failed to add, and to - omit tasks that were successfully added on the first attempt. - - :param job_id: The ID of the job to which the task collection is to be - added. - :type job_id: str - :param value: The collection of tasks to add. The total serialized - size of this collection must be less than 4MB. If it is greater than - 4MB (for example if each task has 100's of resource files or - environment variables), the request will fail with code - 'RequestBodyTooLarge' and should be retried again with fewer tasks. - :type value: list of :class:`TaskAddParameter - ` - :param task_add_collection_options: Additional parameters for the - operation - :type task_add_collection_options: :class:`TaskAddCollectionOptions - ` - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param int threads: number of threads to use in parallel when adding tasks. If specified - and greater than 0, will start additional threads to submit requests and wait for them to finish. - Otherwise will submit add_collection requests sequentially on main thread - :return: :class:`TaskAddCollectionResult - ` or - :class:`ClientRawResponse` if - raw=true - :rtype: :class:`TaskAddCollectionResult - ` or - :class:`ClientRawResponse` - :raises: - :class:`CreateTasksErrorException` - """ - - results_queue = collections.deque() # deque operations(append/pop) are thread-safe - task_workflow_manager = _TaskWorkflowManager( - self, - original_add_collection, - job_id, - value, - task_add_collection_options, - custom_headers, - raw, - **operation_config) - - # multi-threaded behavior - if threads: - if threads < 0: - raise ValueError("Threads must be positive or 0") - - active_threads = [] - for i in range(threads): - active_threads.append(threading.Thread( - target=task_workflow_manager.task_collection_thread_handler, - args=(results_queue,))) - active_threads[-1].start() - for thread in active_threads: - thread.join() - # single-threaded behavior - else: - task_workflow_manager.task_collection_thread_handler(results_queue) - - if task_workflow_manager.error: - raise task_workflow_manager.error # pylint: disable=raising-bad-type - else: - submitted_tasks = _handle_output(results_queue) - return TaskAddCollectionResult(value=submitted_tasks) - bulk_add_collection.metadata = {'url': '/jobs/{jobId}/addtaskcollection'} - return bulk_add_collection - - -def patch_client(): - try: - models = sys.modules['azure.batch.models'] - except KeyError: - models = importlib.import_module('azure.batch.models') - setattr(models, 'CreateTasksErrorException', CreateTasksErrorException) - sys.modules['azure.batch.models'] = models - - operations_modules = importlib.import_module('azure.batch.operations') - operations_modules.TaskOperations.add_collection = build_new_add_collection(operations_modules.TaskOperations.add_collection) diff --git a/azure-batch/azure/batch/models/batch_service_client_enums.py b/azure-batch/azure/batch/models/batch_service_client_enums.py index c2775616ecb6..a2c68e719280 100644 --- a/azure-batch/azure/batch/models/batch_service_client_enums.py +++ b/azure-batch/azure/batch/models/batch_service_client_enums.py @@ -83,8 +83,8 @@ class CertificateStoreLocation(str, Enum): class CertificateVisibility(str, Enum): start_task = "starttask" #: The certificate should be visible to the user account under which the start task is run. - task = "task" #: The certificate should be visibile to the user accounts under which job tasks are run. - remote_user = "remoteuser" #: The certificate should be visibile to the user accounts under which users remotely access the node. + task = "task" #: The certificate should be visible to the user accounts under which job tasks are run. + remote_user = "remoteuser" #: The certificate should be visible to the user accounts under which users remotely access the node. class CachingType(str, Enum): @@ -230,7 +230,7 @@ class ComputeNodeState(str, Enum): unknown = "unknown" #: The Batch service has lost contact with the node, and does not know its true state. leaving_pool = "leavingpool" #: The node is leaving the pool, either because the user explicitly removed it or because the pool is resizing or autoscaling down. offline = "offline" #: The node is not currently running a task, and scheduling of new tasks to the node is disabled. - preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was pre-empted will be rescheduled when another node becomes available. + preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was preempted will be rescheduled when another node becomes available. class SchedulingState(str, Enum): diff --git a/azure-batch/azure/batch/models/compute_node.py b/azure-batch/azure/batch/models/compute_node.py index 444598f9e5cd..691adc5bd548 100644 --- a/azure-batch/azure/batch/models/compute_node.py +++ b/azure-batch/azure/batch/models/compute_node.py @@ -24,7 +24,7 @@ class ComputeNode(Model): :type url: str :param state: The current state of the compute node. The low-priority node has been preempted. Tasks which were running on the node when it was - pre-empted will be rescheduled when another node becomes available. + preempted will be rescheduled when another node becomes available. Possible values include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' diff --git a/azure-batch/azure/batch/models/compute_node_py3.py b/azure-batch/azure/batch/models/compute_node_py3.py index 1b5d7405a981..7b9c41a9c07e 100644 --- a/azure-batch/azure/batch/models/compute_node_py3.py +++ b/azure-batch/azure/batch/models/compute_node_py3.py @@ -24,7 +24,7 @@ class ComputeNode(Model): :type url: str :param state: The current state of the compute node. The low-priority node has been preempted. Tasks which were running on the node when it was - pre-empted will be rescheduled when another node becomes available. + preempted will be rescheduled when another node becomes available. Possible values include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' diff --git a/azure-batch/azure/batch/models/exit_conditions.py b/azure-batch/azure/batch/models/exit_conditions.py index c368f4a16f25..660858630d69 100644 --- a/azure-batch/azure/batch/models/exit_conditions.py +++ b/azure-batch/azure/batch/models/exit_conditions.py @@ -35,8 +35,8 @@ class ExitConditions(Model): the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want - non-default behaviour on exit code 0, you must list it explicitly using - the exitCodes or exitCodeRanges collection. + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. :type default: ~azure.batch.models.ExitOptions """ diff --git a/azure-batch/azure/batch/models/exit_conditions_py3.py b/azure-batch/azure/batch/models/exit_conditions_py3.py index f0630c7fecc2..65cd5aaa89f7 100644 --- a/azure-batch/azure/batch/models/exit_conditions_py3.py +++ b/azure-batch/azure/batch/models/exit_conditions_py3.py @@ -35,8 +35,8 @@ class ExitConditions(Model): the exitCodes or exitCodeRanges collection, with a pre-processing error if the preProcessingError property is not present, or with a file upload error if the fileUploadError property is not present. If you want - non-default behaviour on exit code 0, you must list it explicitly using - the exitCodes or exitCodeRanges collection. + non-default behavior on exit code 0, you must list it explicitly using the + exitCodes or exitCodeRanges collection. :type default: ~azure.batch.models.ExitOptions """ diff --git a/azure-batch/azure/batch/models/exit_options.py b/azure-batch/azure/batch/models/exit_options.py index f7f5e7b10578..3e2dee00f962 100644 --- a/azure-batch/azure/batch/models/exit_options.py +++ b/azure-batch/azure/batch/models/exit_options.py @@ -19,7 +19,7 @@ class ExitOptions(Model): the task completes with the given exit condition and the job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the - job's onTaskFailed property is noaction, then specifying this property + job's onTaskFailed property is noAction, then specifying this property returns an error and the add task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'none', 'disable', diff --git a/azure-batch/azure/batch/models/exit_options_py3.py b/azure-batch/azure/batch/models/exit_options_py3.py index 0867bbea15a8..ac81ee1b74fa 100644 --- a/azure-batch/azure/batch/models/exit_options_py3.py +++ b/azure-batch/azure/batch/models/exit_options_py3.py @@ -19,7 +19,7 @@ class ExitOptions(Model): the task completes with the given exit condition and the job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the - job's onTaskFailed property is noaction, then specifying this property + job's onTaskFailed property is noAction, then specifying this property returns an error and the add task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'none', 'disable', diff --git a/azure-batch/azure/batch/models/job_preparation_task.py b/azure-batch/azure/batch/models/job_preparation_task.py index 1987aed378bc..acf8da03e93b 100644 --- a/azure-batch/azure/batch/models/job_preparation_task.py +++ b/azure-batch/azure/batch/models/job_preparation_task.py @@ -104,7 +104,7 @@ class JobPreparationTask(Model): :type wait_for_success: bool :param user_identity: The user identity under which the Job Preparation task runs. If omitted, the task runs as a non-administrative user unique - to the task on Windows nodes, or a a non-administrative user unique to the + to the task on Windows nodes, or a non-administrative user unique to the pool on Linux nodes. :type user_identity: ~azure.batch.models.UserIdentity :param rerun_on_node_reboot_after_success: Whether the Batch service diff --git a/azure-batch/azure/batch/models/job_preparation_task_py3.py b/azure-batch/azure/batch/models/job_preparation_task_py3.py index d1bb21ed384c..93e99d3793dc 100644 --- a/azure-batch/azure/batch/models/job_preparation_task_py3.py +++ b/azure-batch/azure/batch/models/job_preparation_task_py3.py @@ -104,7 +104,7 @@ class JobPreparationTask(Model): :type wait_for_success: bool :param user_identity: The user identity under which the Job Preparation task runs. If omitted, the task runs as a non-administrative user unique - to the task on Windows nodes, or a a non-administrative user unique to the + to the task on Windows nodes, or a non-administrative user unique to the pool on Linux nodes. :type user_identity: ~azure.batch.models.UserIdentity :param rerun_on_node_reboot_after_success: Whether the Batch service diff --git a/azure-batch/azure/batch/models/virtual_machine_configuration.py b/azure-batch/azure/batch/models/virtual_machine_configuration.py index 52b8d7f6c8c5..ebf7f7d6b817 100644 --- a/azure-batch/azure/batch/models/virtual_machine_configuration.py +++ b/azure-batch/azure/batch/models/virtual_machine_configuration.py @@ -39,7 +39,7 @@ class VirtualMachineConfiguration(Model): or osDisk property specifies a Linux OS image. :type windows_configuration: ~azure.batch.models.WindowsConfiguration :param data_disks: The configuration for data disks attached to the - comptue nodes in the pool. This property must be specified if the compute + compute nodes in the pool. This property must be specified if the compute nodes in the pool need to have empty data disks attached to them. This cannot be updated. Each node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. diff --git a/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py b/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py index 686568a7e18b..c38af5c78a29 100644 --- a/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py +++ b/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py @@ -39,7 +39,7 @@ class VirtualMachineConfiguration(Model): or osDisk property specifies a Linux OS image. :type windows_configuration: ~azure.batch.models.WindowsConfiguration :param data_disks: The configuration for data disks attached to the - comptue nodes in the pool. This property must be specified if the compute + compute nodes in the pool. This property must be specified if the compute nodes in the pool need to have empty data disks attached to them. This cannot be updated. Each node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. diff --git a/azure-batch/azure/batch/operations/compute_node_operations.py b/azure-batch/azure/batch/operations/compute_node_operations.py index ea988094f6a4..e88836e7511e 100644 --- a/azure-batch/azure/batch/operations/compute_node_operations.py +++ b/azure-batch/azure/batch/operations/compute_node_operations.py @@ -221,11 +221,11 @@ def update_user( """Updates the password and expiration time of a user account on the specified compute node. - This operation replaces of all the updateable properties of the - account. For example, if the expiryTime element is not specified, the - current value is replaced with the default value, not left unmodified. - You can update a user account on a node only when it is in the idle or - running state. + This operation replaces of all the updatable properties of the account. + For example, if the expiryTime element is not specified, the current + value is replaced with the default value, not left unmodified. You can + update a user account on a node only when it is in the idle or running + state. :param pool_id: The ID of the pool that contains the compute node. :type pool_id: str diff --git a/azure-batch/azure/batch/operations/job_operations.py b/azure-batch/azure/batch/operations/job_operations.py index 333d346add1a..7ef11c40edb4 100644 --- a/azure-batch/azure/batch/operations/job_operations.py +++ b/azure-batch/azure/batch/operations/job_operations.py @@ -462,7 +462,7 @@ def update( self, job_id, job_update_parameter, job_update_options=None, custom_headers=None, raw=False, **operation_config): """Updates the properties of the specified job. - This fully replaces all the updateable properties of the job. For + This fully replaces all the updatable properties of the job. For example, if the job has constraints associated with it and if constraints is not specified with this request, then the Batch service will remove the existing constraints. diff --git a/azure-batch/azure/batch/operations/job_schedule_operations.py b/azure-batch/azure/batch/operations/job_schedule_operations.py index b209a1915ee0..e0909b5cf47b 100644 --- a/azure-batch/azure/batch/operations/job_schedule_operations.py +++ b/azure-batch/azure/batch/operations/job_schedule_operations.py @@ -480,7 +480,7 @@ def update( self, job_schedule_id, job_schedule_update_parameter, job_schedule_update_options=None, custom_headers=None, raw=False, **operation_config): """Updates the properties of the specified job schedule. - This fully replaces all the updateable properties of the job schedule. + This fully replaces all the updatable properties of the job schedule. For example, if the schedule property is not specified with this request, then the Batch service will remove the existing schedule. Changes to a job schedule only impact jobs created by the schedule diff --git a/azure-batch/azure/batch/operations/pool_operations.py b/azure-batch/azure/batch/operations/pool_operations.py index 6d9d40f7e5ef..3792ae2113f6 100644 --- a/azure-batch/azure/batch/operations/pool_operations.py +++ b/azure-batch/azure/batch/operations/pool_operations.py @@ -1407,7 +1407,7 @@ def update_properties( self, pool_id, pool_update_properties_parameter, pool_update_properties_options=None, custom_headers=None, raw=False, **operation_config): """Updates the properties of the specified pool. - This fully replaces all the updateable properties of the pool. For + This fully replaces all the updatable properties of the pool. For example, if the pool has a start task associated with it and if start task is not specified with this request, then the Batch service will remove the existing start task. diff --git a/azure-batch/azure/batch/version.py b/azure-batch/azure/batch/version.py index 2b9421da3e1b..f24f038f478b 100644 --- a/azure-batch/azure/batch/version.py +++ b/azure-batch/azure/batch/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "5.1.1" +VERSION = "2018-08-01.7.0" diff --git a/azure-batch/build.json b/azure-batch/build.json new file mode 100644 index 000000000000..e4a1aef9625a --- /dev/null +++ b/azure-batch/build.json @@ -0,0 +1,1499 @@ +{ + "autorest": [ + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest-core", + "version": "2.0.4283", + "engines": { + "node": ">=7.10.0" + }, + "dependencies": { + "typescript": "2.6.2" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/commonmark": "^0.27.0", + "@types/jsonpath": "^0.1.29", + "@types/mocha": "5.2.0", + "@types/node": "^8.0.53", + "@types/source-map": "0.5.0", + "@types/yargs": "^8.0.2", + "@types/z-schema": "^3.16.31", + "dts-generator": "^2.1.0", + "mocha": "5.2.0", + "mocha-typescript": "1.1.14", + "shx": "0.2.2", + "static-link": "^0.2.3", + "vscode-jsonrpc": "^3.3.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core", + "_integrity": null, + "_shasum": "849fe6ba18851ee6de05d6ecac55a644440ff356", + "_shrinkwrap": null, + "bin": { + "autorest-core": "./dist/app.js", + "autorest-language-service": "dist/language-service/language-service.js" + }, + "_id": "@microsoft.azure/autorest-core@2.0.4283", + "_from": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core", + "raw": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core", + "_where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4283/node_modules/@microsoft.azure/autorest-core" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest-core", + "version": "2.0.4284", + "engines": { + "node": ">=7.10.0" + }, + "dependencies": { + "typescript": "2.6.2" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/commonmark": "^0.27.0", + "@types/jsonpath": "^0.1.29", + "@types/mocha": "5.2.0", + "@types/node": "^8.0.53", + "@types/source-map": "0.5.0", + "@types/yargs": "^8.0.2", + "@types/z-schema": "^3.16.31", + "dts-generator": "^2.1.0", + "mocha": "5.2.0", + "mocha-typescript": "1.1.14", + "shx": "0.2.2", + "static-link": "^0.2.3", + "vscode-jsonrpc": "^3.3.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core", + "_integrity": null, + "_shasum": "17a8c56ed5da179015e36ad96039598639c890a5", + "_shrinkwrap": null, + "bin": { + "autorest-core": "./dist/app.js", + "autorest-language-service": "dist/language-service/language-service.js" + }, + "_id": "@microsoft.azure/autorest-core@2.0.4284", + "_from": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core", + "raw": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core", + "_where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4284/node_modules/@microsoft.azure/autorest-core" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest-core", + "version": "2.0.4285", + "engines": { + "node": ">=7.10.0" + }, + "dependencies": { + "typescript": "2.6.2" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/commonmark": "^0.27.0", + "@types/jsonpath": "^0.1.29", + "@types/mocha": "5.2.0", + "@types/node": "^8.0.53", + "@types/source-map": "0.5.0", + "@types/yargs": "^8.0.2", + "@types/z-schema": "^3.16.31", + "dts-generator": "^2.1.0", + "mocha": "5.2.0", + "mocha-typescript": "1.1.14", + "shx": "0.2.2", + "static-link": "^0.2.3", + "tslint": "^5.9.1", + "tslint-language-service": "^0.9.9", + "tslint-microsoft-contrib": "^5.0.3", + "vscode-jsonrpc": "^3.3.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core", + "_integrity": null, + "_shasum": "045d7c4811a3746b847b6aab56658667e68bbac7", + "_shrinkwrap": null, + "bin": { + "autorest-core": "./dist/app.js", + "autorest-language-service": "dist/language-service/language-service.js" + }, + "_id": "@microsoft.azure/autorest-core@2.0.4285", + "_from": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core", + "raw": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core", + "_where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4285/node_modules/@microsoft.azure/autorest-core" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest-core", + "version": "2.0.4286", + "engines": { + "node": ">=7.10.0" + }, + "dependencies": { + "typescript": "2.6.2" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/commonmark": "^0.27.0", + "@types/jsonpath": "^0.1.29", + "@types/mocha": "5.2.0", + "@types/node": "^8.0.53", + "@types/source-map": "0.5.0", + "@types/yargs": "^8.0.2", + "@types/z-schema": "^3.16.31", + "dts-generator": "^2.1.0", + "mocha": "5.2.0", + "mocha-typescript": "1.1.14", + "shx": "0.2.2", + "static-link": "^0.2.3", + "tslint": "^5.9.1", + "tslint-language-service": "^0.9.9", + "tslint-microsoft-contrib": "^5.0.3", + "vscode-jsonrpc": "^3.3.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core", + "_integrity": null, + "_shasum": "9de22c342ada6dfd649f3d486bab7092cbb5e150", + "_shrinkwrap": null, + "bin": { + "autorest-core": "./dist/app.js", + "autorest-language-service": "dist/language-service/language-service.js" + }, + "_id": "@microsoft.azure/autorest-core@2.0.4286", + "_from": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core", + "raw": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core", + "_where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4286/node_modules/@microsoft.azure/autorest-core" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest-core", + "version": "2.0.4288", + "engines": { + "node": ">=7.10.0" + }, + "dependencies": { + "typescript": "2.6.2" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/commonmark": "^0.27.0", + "@types/jsonpath": "^0.1.29", + "@types/mocha": "5.2.0", + "@types/node": "^8.0.53", + "@types/source-map": "0.5.0", + "@types/yargs": "^8.0.2", + "@types/z-schema": "^3.16.31", + "dts-generator": "^2.1.0", + "mocha": "5.2.0", + "mocha-typescript": "1.1.14", + "shx": "0.2.2", + "static-link": "^0.2.3", + "tslint": "^5.9.1", + "tslint-language-service": "^0.9.9", + "tslint-microsoft-contrib": "^5.0.3", + "vscode-jsonrpc": "^3.3.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core", + "_integrity": null, + "_shasum": "e555a23ab33d4c1e51a13eff4a72aeb861c5c459", + "_shrinkwrap": null, + "bin": { + "autorest-core": "./dist/app.js", + "autorest-language-service": "dist/language-service/language-service.js" + }, + "_id": "@microsoft.azure/autorest-core@2.0.4288", + "_from": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core", + "raw": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core", + "_where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4288/node_modules/@microsoft.azure/autorest-core" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest-core", + "version": "2.0.4289", + "engines": { + "node": ">=7.10.0" + }, + "dependencies": { + "typescript": "2.6.2" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/commonmark": "^0.27.0", + "@types/jsonpath": "^0.1.29", + "@types/mocha": "5.2.0", + "@types/node": "^8.0.53", + "@types/source-map": "0.5.0", + "@types/yargs": "^8.0.2", + "@types/z-schema": "^3.16.31", + "dts-generator": "^2.1.0", + "mocha": "5.2.0", + "mocha-typescript": "1.1.14", + "shx": "0.2.2", + "static-link": "^0.2.3", + "tslint": "^5.9.1", + "tslint-language-service": "^0.9.9", + "tslint-microsoft-contrib": "^5.0.3", + "vscode-jsonrpc": "^3.3.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core", + "_integrity": null, + "_shasum": "652478ce1e2dad955624be6fa14907055b15dbb6", + "_shrinkwrap": null, + "bin": { + "autorest-core": "./dist/app.js", + "autorest-language-service": "dist/language-service/language-service.js" + }, + "_id": "@microsoft.azure/autorest-core@2.0.4289", + "_from": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core", + "raw": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core", + "_where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4289/node_modules/@microsoft.azure/autorest-core" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest-core", + "version": "2.0.4290", + "engines": { + "node": ">=7.10.0" + }, + "dependencies": {}, + "optionalDependencies": {}, + "devDependencies": { + "@types/commonmark": "^0.27.0", + "@types/jsonpath": "^0.1.29", + "@types/node": "10.9.4", + "@types/source-map": "0.5.0", + "@types/yargs": "^8.0.2", + "@types/z-schema": "^3.16.31", + "dts-generator": "^2.1.0", + "mocha": "^5.0.0", + "mocha-typescript": "1.1.17", + "shx": "0.2.2", + "static-link": "^0.2.3", + "tslint": "^5.9.1", + "tslint-language-service": "^0.9.9", + "tslint-microsoft-contrib": "^5.0.3", + "typescript": "^3.0.0", + "vscode-jsonrpc": "^3.3.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core", + "_integrity": null, + "_shasum": "3ec16aca3c0c1ce3de24e2ba16bd5ac292128cf2", + "_shrinkwrap": null, + "bin": { + "autorest-core": "./dist/app.js", + "autorest-language-service": "dist/language-service/language-service.js" + }, + "_id": "@microsoft.azure/autorest-core@2.0.4290", + "_from": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core", + "raw": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core", + "_where": "/root/.autorest/@microsoft.azure_autorest-core@2.0.4290/node_modules/@microsoft.azure/autorest-core" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest.csharp", + "version": "2.3.82", + "dependencies": { + "dotnet-2.0.0": "^1.4.4" + }, + "optionalDependencies": {}, + "devDependencies": { + "@microsoft.azure/autorest.modeler": "2.3.55", + "@microsoft.azure/autorest.testserver": "^2.5.16", + "autorest": "^2.0.4255", + "coffee-script": "^1.11.1", + "dotnet-sdk-2.0.0": "^1.4.4", + "gulp": "^3.9.1", + "gulp-filter": "^5.0.0", + "gulp-line-ending-corrector": "^1.0.1", + "iced-coffee-script": "^108.0.11", + "marked": "^0.3.6", + "marked-terminal": "^2.0.0", + "moment": "^2.17.1", + "run-sequence": "*", + "shx": "^0.2.2", + "through2-parallel": "^0.1.3", + "yargs": "^8.0.2", + "yarn": "^1.0.2" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp", + "_integrity": null, + "_shasum": "fd9876552e6bbc266aa2516cbce01f352cc470bc", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/autorest.csharp@2.3.82", + "_from": "file:/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp", + "raw": "/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp", + "_where": "/root/.autorest/@microsoft.azure_autorest.csharp@2.3.82/node_modules/@microsoft.azure/autorest.csharp" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest.java", + "version": "2.1.81", + "dependencies": { + "dotnet-2.0.0": "^1.4.4" + }, + "optionalDependencies": {}, + "devDependencies": { + "@microsoft.azure/autorest.testserver": "^2.5.24", + "autorest": "^2.0.4280", + "coffee-script": "^1.11.1", + "dotnet-sdk-2.0.0": "^1.4.4", + "gulp": "^3.9.1", + "gulp-filter": "^5.0.0", + "gulp-line-ending-corrector": "^1.0.1", + "iced-coffee-script": "^108.0.11", + "marked": "^0.3.6", + "marked-terminal": "^2.0.0", + "moment": "^2.17.1", + "run-sequence": "*", + "shx": "^0.2.2", + "through2-parallel": "^0.1.3", + "yargs": "^8.0.2", + "yarn": "^1.0.2" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java", + "_integrity": null, + "_shasum": "98fef8c6529daf743d2ba20cd66ebe87b76de880", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/autorest.java@2.1.81", + "_from": "file:/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java", + "raw": "/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java", + "_where": "/root/.autorest/@microsoft.azure_autorest.java@2.1.81/node_modules/@microsoft.azure/autorest.java" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest.modeler", + "version": "2.3.38", + "dependencies": { + "dotnet-2.0.0": "^1.4.4" + }, + "optionalDependencies": {}, + "devDependencies": { + "@microsoft.azure/autorest.testserver": "2.3.1", + "autorest": "^2.0.4201", + "coffee-script": "^1.11.1", + "dotnet-sdk-2.0.0": "^1.4.4", + "gulp": "^3.9.1", + "gulp-filter": "^5.0.0", + "gulp-line-ending-corrector": "^1.0.1", + "iced-coffee-script": "^108.0.11", + "marked": "^0.3.6", + "marked-terminal": "^2.0.0", + "moment": "^2.17.1", + "run-sequence": "*", + "shx": "^0.2.2", + "through2-parallel": "^0.1.3", + "yargs": "^8.0.2", + "yarn": "^1.0.2" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", + "_integrity": null, + "_shasum": "903bb77932e4ed1b8bc3b25cc39b167143494f6c", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/autorest.modeler@2.3.38", + "_from": "file:/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", + "raw": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler", + "_where": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.38/node_modules/@microsoft.azure/autorest.modeler" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest.modeler", + "version": "2.3.44", + "dependencies": { + "dotnet-2.0.0": "^1.4.4" + }, + "optionalDependencies": {}, + "devDependencies": { + "@microsoft.azure/autorest.testserver": "2.3.17", + "autorest": "^2.0.4225", + "coffee-script": "^1.11.1", + "dotnet-sdk-2.0.0": "^1.4.4", + "gulp": "^3.9.1", + "gulp-filter": "^5.0.0", + "gulp-line-ending-corrector": "^1.0.1", + "iced-coffee-script": "^108.0.11", + "marked": "^0.3.6", + "marked-terminal": "^2.0.0", + "moment": "^2.17.1", + "run-sequence": "*", + "shx": "^0.2.2", + "through2-parallel": "^0.1.3", + "yargs": "^8.0.2", + "yarn": "^1.0.2" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", + "_integrity": null, + "_shasum": "9b5a880a77467be33a77f002f03230d3ccc21266", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/autorest.modeler@2.3.44", + "_from": "file:/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", + "raw": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler", + "_where": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.44/node_modules/@microsoft.azure/autorest.modeler" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest.modeler", + "version": "2.3.55", + "dependencies": { + "dotnet-2.0.0": "^1.4.4" + }, + "optionalDependencies": {}, + "devDependencies": { + "@microsoft.azure/autorest.testserver": "2.5.16", + "autorest": "^2.0.4225", + "coffee-script": "^1.11.1", + "dotnet-sdk-2.0.0": "^1.4.4", + "gulp": "^3.9.1", + "gulp-filter": "^5.0.0", + "gulp-line-ending-corrector": "^1.0.1", + "iced-coffee-script": "^108.0.11", + "marked": "^0.3.6", + "marked-terminal": "^2.0.0", + "moment": "^2.17.1", + "run-sequence": "*", + "shx": "^0.2.2", + "through2-parallel": "^0.1.3", + "yargs": "^8.0.2", + "yarn": "^1.0.2" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler", + "_integrity": null, + "_shasum": "349f5ac349dc8b1da8b47c45cae3572092cb423f", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/autorest.modeler@2.3.55", + "_from": "file:/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler", + "raw": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler", + "_where": "/root/.autorest/@microsoft.azure_autorest.modeler@2.3.55/node_modules/@microsoft.azure/autorest.modeler" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest.python", + "version": "2.1.40", + "dependencies": { + "dotnet-2.0.0": "^1.4.4" + }, + "optionalDependencies": {}, + "devDependencies": { + "@microsoft.azure/autorest.testserver": "^2.4.0", + "autorest": "^2.0.4203", + "coffee-script": "^1.11.1", + "dotnet-sdk-2.0.0": "^1.4.4", + "gulp": "^3.9.1", + "gulp-filter": "^5.0.0", + "gulp-line-ending-corrector": "^1.0.1", + "iced-coffee-script": "^108.0.11", + "marked": "^0.3.6", + "marked-terminal": "^2.0.0", + "moment": "^2.17.1", + "run-sequence": "*", + "shx": "^0.2.2", + "through2-parallel": "^0.1.3", + "yargs": "^8.0.2", + "yarn": "^1.0.2" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python", + "_integrity": null, + "_shasum": "9b3f08c892d725ac571b3a7dc8f781d76da64397", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/autorest.python@2.1.40", + "_from": "file:/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python", + "raw": "/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python", + "_where": "/root/.autorest/@microsoft.azure_autorest.python@2.1.40/node_modules/@microsoft.azure/autorest.python" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/autorest.python", + "version": "3.0.58", + "dependencies": { + "dotnet-2.0.0": "^1.4.4" + }, + "optionalDependencies": {}, + "devDependencies": { + "@microsoft.azure/autorest.testserver": "^2.5.14", + "autorest": "^2.0.4203", + "coffee-script": "^1.11.1", + "dotnet-sdk-2.0.0": "^1.4.4", + "gulp": "^3.9.1", + "gulp-filter": "^5.0.0", + "gulp-line-ending-corrector": "^1.0.1", + "iced-coffee-script": "^108.0.11", + "marked": "^0.3.6", + "marked-terminal": "^2.0.0", + "moment": "^2.17.1", + "run-sequence": "*", + "shx": "^0.2.2", + "through2-parallel": "^0.1.3", + "yargs": "^8.0.2", + "yarn": "^1.0.2" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python", + "_integrity": null, + "_shasum": "a13c02314121f49840d0a888554d04b24b8fe6c1", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/autorest.python@3.0.58", + "_from": "file:/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python", + "raw": "/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python", + "rawSpec": "/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python", + "saveSpec": "file:/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python", + "fetchSpec": "/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python" + }, + "_spec": "/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python", + "_where": "/root/.autorest/@microsoft.azure_autorest.python@3.0.58/node_modules/@microsoft.azure/autorest.python" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/classic-openapi-validator", + "version": "1.0.13", + "dependencies": { + "dotnet-2.0.0": "^1.1.0" + }, + "optionalDependencies": {}, + "devDependencies": { + "dotnet-sdk-2.0.0": "^1.1.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator", + "_integrity": null, + "_shasum": "101b44d78bd4943561b9af87a5006270dad5fc66", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/classic-openapi-validator@1.0.13", + "_from": "file:/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator", + "raw": "/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator", + "rawSpec": "/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator", + "saveSpec": "file:/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator", + "fetchSpec": "/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator" + }, + "_spec": "/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator", + "_where": "/root/.autorest/@microsoft.azure_classic-openapi-validator@1.0.13/node_modules/@microsoft.azure/classic-openapi-validator" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "@microsoft.azure/openapi-validator", + "version": "1.0.4", + "dependencies": { + "fs": "^0.0.1-security", + "js-yaml": "^3.8.4", + "jsonpath": "^0.2.11", + "vscode-jsonrpc": "^3.2.0" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/js-yaml": "^3.5.30", + "@types/jsonpath": "^0.1.29", + "@types/node": "^7.0.18", + "gulp": "3.9.1", + "gulp-clean": "0.3.2", + "gulp-dotnet-cli": "0.4.0", + "gulp-mocha": "4.3.1", + "gulp-run": "1.7.1", + "mocha": "3.2.0", + "mocha-typescript": "1.0.22", + "typescript": "2.3.3" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator", + "_integrity": null, + "_shasum": "0393bebc041aea17273e4dedee48da6586fbc4d1", + "_shrinkwrap": null, + "bin": null, + "_id": "@microsoft.azure/openapi-validator@1.0.4", + "_from": "file:/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator", + "_requested": { + "type": "directory", + "where": "/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator", + "raw": "/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator", + "rawSpec": "/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator", + "saveSpec": "file:/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator", + "fetchSpec": "/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator" + }, + "_spec": "/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator", + "_where": "/root/.autorest/@microsoft.azure_openapi-validator@1.0.4/node_modules/@microsoft.azure/openapi-validator" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + }, + { + "resolvedInfo": null, + "packageMetadata": { + "name": "oav", + "version": "0.4.70", + "dependencies": { + "@microsoft.azure/autorest-extension-base": "1.0.13", + "@ts-common/iterator": "0.0.32", + "@ts-common/json": "0.0.14", + "@ts-common/json-parser": "0.0.3", + "@ts-common/property-set": "0.0.7", + "@ts-common/source-map": "0.0.18", + "@ts-common/string-map": "0.0.16", + "@ts-common/tuple": "0.0.0", + "@types/lodash": "^4.14.116", + "@types/request": "^2.47.1", + "azure-arm-resource": "^2.0.0-preview", + "glob": "^5.0.14", + "js-yaml": "^3.12.0", + "json-pointer": "^0.6.0", + "json-source-map": "^0.4.0", + "jsonpath": "^1.0.0", + "linq": "^3.1.0", + "lodash": "^4.17.10", + "moment": "~2.22.2", + "ms-rest": "^2.3.6", + "ms-rest-azure": "^2.5.7", + "recursive-readdir": "^2.2.2", + "request": "^2.85.0", + "swagger-parser": "^3.4.1", + "swagger-tools": "^0.10.4", + "uuid": "^3.0.1", + "vscode-jsonrpc": "^3.6.2", + "winston": "^3.0.0", + "yargs": "^6.6.0", + "yasway": "^1.0.5", + "yuml2svg": "^3.1.0" + }, + "optionalDependencies": {}, + "devDependencies": { + "@types/glob": "^5.0.35", + "@types/js-yaml": "^3.11.2", + "@types/json-pointer": "^1.0.30", + "@types/jsonpath": "^0.2.0", + "@types/mocha": "^5.2.5", + "@types/recursive-readdir": "^2.2.0", + "@types/should": "^8.1.30", + "@types/uuid": "^3.4.3", + "@types/yargs": "^11.0.0", + "mocha": "^5.2.0", + "nyc": "^11.8.0", + "should": "5.2.0", + "ts-node": "^6.0.5", + "tslint": "^5.11.0", + "typescript": "^3.0.1" + }, + "bundleDependencies": false, + "peerDependencies": {}, + "deprecated": false, + "_resolved": "/root/.autorest/oav@0.4.70/node_modules/oav", + "_integrity": null, + "_shasum": "731605d4f6cce0be1824ae8e5fd383977fda0ad9", + "_shrinkwrap": null, + "bin": { + "oav": "./dist/cli.js" + }, + "_id": "oav@0.4.70", + "_from": "file:/root/.autorest/oav@0.4.70/node_modules/oav", + "_requested": { + "type": "directory", + "where": "/root/.autorest/oav@0.4.70/node_modules/oav", + "raw": "/root/.autorest/oav@0.4.70/node_modules/oav", + "rawSpec": "/root/.autorest/oav@0.4.70/node_modules/oav", + "saveSpec": "file:/root/.autorest/oav@0.4.70/node_modules/oav", + "fetchSpec": "/root/.autorest/oav@0.4.70/node_modules/oav" + }, + "_spec": "/root/.autorest/oav@0.4.70/node_modules/oav", + "_where": "/root/.autorest/oav@0.4.70/node_modules/oav" + }, + "extensionManager": { + "installationPath": "/root/.autorest", + "sharedLock": { + "name": "/root/.autorest", + "exclusiveLock": { + "name": "_root_.autorest.exclusive-lock", + "options": { + "port": 45234, + "host": "2130706813", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.exclusive-lock:45234" + }, + "busyLock": { + "name": "_root_.autorest.busy-lock", + "options": { + "port": 37199, + "host": "2130756895", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" + }, + "personalLock": { + "name": "_root_.autorest.6033.205377309443.personal-lock", + "options": { + "port": 24073, + "host": "2130710678", + "exclusive": true + }, + "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + }, + "file": "/tmp/_root_.autorest.lock" + }, + "dotnetPath": "/root/.dotnet" + }, + "installationPath": "/root/.autorest" + } + ], + "autorest_bootstrap": {} +} \ No newline at end of file From 66b72126043ea4e3643b9ba621059e547275e9ea Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Tue, 11 Dec 2018 09:58:14 -0800 Subject: [PATCH 2/3] [AutoPR batch/data-plane] typo: batch/data-plane/Microsoft.Batch (#4042) * Generated from f6c37b84d2821a16f7a0861e7c21f169dee1a97d typo: batch/data-plane/Microsoft.Batch - updateable -> updatable - Double word "a" * Generated from f6c37b84d2821a16f7a0861e7c21f169dee1a97d typo: batch/data-plane/Microsoft.Batch - updateable -> updatable - Double word "a" --- .../azure/batch/batch_service_client.py | 20 ++- azure-batch/azure/batch/models/__init__.py | 19 ++- .../models/batch_service_client_enums.py | 15 +- azure-batch/azure/batch/models/cloud_job.py | 4 + .../azure/batch/models/cloud_job_py3.py | 6 +- azure-batch/azure/batch/models/cloud_pool.py | 2 +- .../azure/batch/models/cloud_pool_py3.py | 2 +- .../models/cloud_service_configuration.py | 25 +-- .../models/cloud_service_configuration_py3.py | 27 +-- .../azure/batch/models/compute_node.py | 10 +- .../azure/batch/models/compute_node_py3.py | 10 +- .../azure/batch/models/exit_options.py | 2 +- .../azure/batch/models/exit_options_py3.py | 2 +- .../azure/batch/models/image_reference.py | 5 +- .../azure/batch/models/image_reference_py3.py | 5 +- .../azure/batch/models/job_add_parameter.py | 4 + .../batch/models/job_add_parameter_py3.py | 6 +- .../batch/models/job_network_configuration.py | 43 +++++ .../models/job_network_configuration_py3.py | 43 +++++ .../azure/batch/models/job_release_task.py | 4 +- .../batch/models/job_release_task_py3.py | 4 +- .../azure/batch/models/job_specification.py | 4 + .../batch/models/job_specification_py3.py | 6 +- .../batch/models/network_configuration.py | 29 ++-- .../batch/models/network_configuration_py3.py | 31 ++-- azure-batch/azure/batch/models/os_disk.py | 32 ---- azure-batch/azure/batch/models/os_disk_py3.py | 32 ---- .../batch/models/pool_upgrade_os_options.py | 73 -------- .../models/pool_upgrade_os_options_py3.py | 73 -------- .../batch/models/pool_upgrade_os_parameter.py | 35 ---- .../models/pool_upgrade_os_parameter_py3.py | 35 ---- .../azure/batch/models/pool_usage_metrics.py | 12 -- .../batch/models/pool_usage_metrics_py3.py | 14 +- .../azure/batch/models/resource_file.py | 70 +++++--- .../azure/batch/models/resource_file_py3.py | 72 +++++--- .../azure/batch/models/task_add_parameter.py | 4 +- .../batch/models/task_add_parameter_py3.py | 4 +- .../azure/batch/models/task_constraints.py | 8 +- .../batch/models/task_constraints_py3.py | 8 +- .../azure/batch/models/user_account.py | 8 + .../azure/batch/models/user_account_py3.py | 10 +- .../azure/batch/models/user_identity.py | 5 +- .../azure/batch/models/user_identity_py3.py | 5 +- .../models/virtual_machine_configuration.py | 9 +- .../virtual_machine_configuration_py3.py | 11 +- .../models/windows_user_configuration.py | 31 ++++ .../models/windows_user_configuration_py3.py | 31 ++++ .../batch/operations/account_operations.py | 12 +- .../operations/application_operations.py | 9 +- .../operations/certificate_operations.py | 15 +- .../operations/compute_node_operations.py | 16 +- .../azure/batch/operations/file_operations.py | 12 +- .../azure/batch/operations/job_operations.py | 31 +++- .../operations/job_schedule_operations.py | 20 ++- .../azure/batch/operations/pool_operations.py | 158 ++++-------------- .../azure/batch/operations/task_operations.py | 21 ++- azure-batch/azure/batch/version.py | 2 +- azure-batch/build.json | 136 +++++++-------- 58 files changed, 620 insertions(+), 722 deletions(-) create mode 100644 azure-batch/azure/batch/models/job_network_configuration.py create mode 100644 azure-batch/azure/batch/models/job_network_configuration_py3.py delete mode 100644 azure-batch/azure/batch/models/os_disk.py delete mode 100644 azure-batch/azure/batch/models/os_disk_py3.py delete mode 100644 azure-batch/azure/batch/models/pool_upgrade_os_options.py delete mode 100644 azure-batch/azure/batch/models/pool_upgrade_os_options_py3.py delete mode 100644 azure-batch/azure/batch/models/pool_upgrade_os_parameter.py delete mode 100644 azure-batch/azure/batch/models/pool_upgrade_os_parameter_py3.py create mode 100644 azure-batch/azure/batch/models/windows_user_configuration.py create mode 100644 azure-batch/azure/batch/models/windows_user_configuration_py3.py diff --git a/azure-batch/azure/batch/batch_service_client.py b/azure-batch/azure/batch/batch_service_client.py index 80c19929f9cb..9a09b4bd0282 100644 --- a/azure-batch/azure/batch/batch_service_client.py +++ b/azure-batch/azure/batch/batch_service_client.py @@ -33,16 +33,18 @@ class BatchServiceClientConfiguration(AzureConfiguration): :param credentials: Credentials needed for the client to connect to Azure. :type credentials: :mod:`A msrestazure Credentials object` - :param str base_url: Service URL + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str """ def __init__( - self, credentials, base_url=None): + self, credentials, batch_url): if credentials is None: raise ValueError("Parameter 'credentials' must not be None.") - if not base_url: - base_url = 'https://batch.core.windows.net' + if batch_url is None: + raise ValueError("Parameter 'batch_url' must not be None.") + base_url = '{batchUrl}' super(BatchServiceClientConfiguration, self).__init__(base_url) @@ -50,6 +52,7 @@ def __init__( self.add_user_agent('Azure-SDK-For-Python') self.credentials = credentials + self.batch_url = batch_url class BatchServiceClient(SDKClient): @@ -80,17 +83,18 @@ class BatchServiceClient(SDKClient): :param credentials: Credentials needed for the client to connect to Azure. :type credentials: :mod:`A msrestazure Credentials object` - :param str base_url: Service URL + :param batch_url: The base URL for all Azure Batch service requests. + :type batch_url: str """ def __init__( - self, credentials, base_url=None): + self, credentials, batch_url): - self.config = BatchServiceClientConfiguration(credentials, base_url) + self.config = BatchServiceClientConfiguration(credentials, batch_url) super(BatchServiceClient, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2018-08-01.7.0' + self.api_version = '2018-12-01.8.0' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) diff --git a/azure-batch/azure/batch/models/__init__.py b/azure-batch/azure/batch/models/__init__.py index 5ffc99a73460..ef769e49b54b 100644 --- a/azure-batch/azure/batch/models/__init__.py +++ b/azure-batch/azure/batch/models/__init__.py @@ -28,6 +28,7 @@ from .node_file_py3 import NodeFile from .schedule_py3 import Schedule from .job_constraints_py3 import JobConstraints + from .job_network_configuration_py3 import JobNetworkConfiguration from .container_registry_py3 import ContainerRegistry from .task_container_settings_py3 import TaskContainerSettings from .resource_file_py3 import ResourceFile @@ -39,6 +40,7 @@ from .auto_user_specification_py3 import AutoUserSpecification from .user_identity_py3 import UserIdentity from .linux_user_configuration_py3 import LinuxUserConfiguration + from .windows_user_configuration_py3 import WindowsUserConfiguration from .user_account_py3 import UserAccount from .task_constraints_py3 import TaskConstraints from .output_file_blob_container_destination_py3 import OutputFileBlobContainerDestination @@ -53,7 +55,6 @@ from .certificate_reference_py3 import CertificateReference from .metadata_item_py3 import MetadataItem from .cloud_service_configuration_py3 import CloudServiceConfiguration - from .os_disk_py3 import OSDisk from .windows_configuration_py3 import WindowsConfiguration from .data_disk_py3 import DataDisk from .container_configuration_py3 import ContainerConfiguration @@ -122,7 +123,6 @@ from .pool_evaluate_auto_scale_parameter_py3 import PoolEvaluateAutoScaleParameter from .pool_resize_parameter_py3 import PoolResizeParameter from .pool_update_properties_parameter_py3 import PoolUpdatePropertiesParameter - from .pool_upgrade_os_parameter_py3 import PoolUpgradeOSParameter from .pool_patch_parameter_py3 import PoolPatchParameter from .task_update_parameter_py3 import TaskUpdateParameter from .node_update_user_parameter_py3 import NodeUpdateUserParameter @@ -150,7 +150,6 @@ from .pool_resize_options_py3 import PoolResizeOptions from .pool_stop_resize_options_py3 import PoolStopResizeOptions from .pool_update_properties_options_py3 import PoolUpdatePropertiesOptions - from .pool_upgrade_os_options_py3 import PoolUpgradeOsOptions from .pool_remove_nodes_options_py3 import PoolRemoveNodesOptions from .account_list_node_agent_skus_options_py3 import AccountListNodeAgentSkusOptions from .account_list_pool_node_counts_options_py3 import AccountListPoolNodeCountsOptions @@ -230,6 +229,7 @@ from .node_file import NodeFile from .schedule import Schedule from .job_constraints import JobConstraints + from .job_network_configuration import JobNetworkConfiguration from .container_registry import ContainerRegistry from .task_container_settings import TaskContainerSettings from .resource_file import ResourceFile @@ -241,6 +241,7 @@ from .auto_user_specification import AutoUserSpecification from .user_identity import UserIdentity from .linux_user_configuration import LinuxUserConfiguration + from .windows_user_configuration import WindowsUserConfiguration from .user_account import UserAccount from .task_constraints import TaskConstraints from .output_file_blob_container_destination import OutputFileBlobContainerDestination @@ -255,7 +256,6 @@ from .certificate_reference import CertificateReference from .metadata_item import MetadataItem from .cloud_service_configuration import CloudServiceConfiguration - from .os_disk import OSDisk from .windows_configuration import WindowsConfiguration from .data_disk import DataDisk from .container_configuration import ContainerConfiguration @@ -324,7 +324,6 @@ from .pool_evaluate_auto_scale_parameter import PoolEvaluateAutoScaleParameter from .pool_resize_parameter import PoolResizeParameter from .pool_update_properties_parameter import PoolUpdatePropertiesParameter - from .pool_upgrade_os_parameter import PoolUpgradeOSParameter from .pool_patch_parameter import PoolPatchParameter from .task_update_parameter import TaskUpdateParameter from .node_update_user_parameter import NodeUpdateUserParameter @@ -352,7 +351,6 @@ from .pool_resize_options import PoolResizeOptions from .pool_stop_resize_options import PoolStopResizeOptions from .pool_update_properties_options import PoolUpdatePropertiesOptions - from .pool_upgrade_os_options import PoolUpgradeOsOptions from .pool_remove_nodes_options import PoolRemoveNodesOptions from .account_list_node_agent_skus_options import AccountListNodeAgentSkusOptions from .account_list_pool_node_counts_options import AccountListPoolNodeCountsOptions @@ -434,12 +432,14 @@ DependencyAction, AutoUserScope, ElevationLevel, + LoginMode, OutputFileUploadCondition, ComputeNodeFillType, CertificateStoreLocation, CertificateVisibility, CachingType, StorageAccountType, + DynamicVNetAssignmentScope, InboundEndpointProtocol, NetworkSecurityGroupRuleAccess, PoolLifetimeOption, @@ -485,6 +485,7 @@ 'NodeFile', 'Schedule', 'JobConstraints', + 'JobNetworkConfiguration', 'ContainerRegistry', 'TaskContainerSettings', 'ResourceFile', @@ -496,6 +497,7 @@ 'AutoUserSpecification', 'UserIdentity', 'LinuxUserConfiguration', + 'WindowsUserConfiguration', 'UserAccount', 'TaskConstraints', 'OutputFileBlobContainerDestination', @@ -510,7 +512,6 @@ 'CertificateReference', 'MetadataItem', 'CloudServiceConfiguration', - 'OSDisk', 'WindowsConfiguration', 'DataDisk', 'ContainerConfiguration', @@ -579,7 +580,6 @@ 'PoolEvaluateAutoScaleParameter', 'PoolResizeParameter', 'PoolUpdatePropertiesParameter', - 'PoolUpgradeOSParameter', 'PoolPatchParameter', 'TaskUpdateParameter', 'NodeUpdateUserParameter', @@ -607,7 +607,6 @@ 'PoolResizeOptions', 'PoolStopResizeOptions', 'PoolUpdatePropertiesOptions', - 'PoolUpgradeOsOptions', 'PoolRemoveNodesOptions', 'AccountListNodeAgentSkusOptions', 'AccountListPoolNodeCountsOptions', @@ -688,12 +687,14 @@ 'DependencyAction', 'AutoUserScope', 'ElevationLevel', + 'LoginMode', 'OutputFileUploadCondition', 'ComputeNodeFillType', 'CertificateStoreLocation', 'CertificateVisibility', 'CachingType', 'StorageAccountType', + 'DynamicVNetAssignmentScope', 'InboundEndpointProtocol', 'NetworkSecurityGroupRuleAccess', 'PoolLifetimeOption', diff --git a/azure-batch/azure/batch/models/batch_service_client_enums.py b/azure-batch/azure/batch/models/batch_service_client_enums.py index a2c68e719280..9c04014cfe91 100644 --- a/azure-batch/azure/batch/models/batch_service_client_enums.py +++ b/azure-batch/azure/batch/models/batch_service_client_enums.py @@ -61,6 +61,12 @@ class ElevationLevel(str, Enum): admin = "admin" #: The user is a user with elevated access and operates with full Administrator permissions. +class LoginMode(str, Enum): + + batch = "batch" #: The LOGON32_LOGON_BATCH Win32 login mode. The batch login mode is recommended for long running parallel processes. + interactive = "interactive" #: The LOGON32_LOGON_INTERACTIVE Win32 login mode. UAC is enabled on Windows VirtualMachineConfiguration pools. If this option is used with an elevated user identity in a Windows VirtualMachineConfiguration pool, the user session will not be elevated unless the application executed by the task command line is configured to always require administrative privilege or to always require maximum privilege. + + class OutputFileUploadCondition(str, Enum): task_success = "tasksuccess" #: Upload the file(s) only after the task process exits with an exit code of 0. @@ -100,6 +106,12 @@ class StorageAccountType(str, Enum): premium_lrs = "premium_lrs" #: The data disk should use premium locally redundant storage. +class DynamicVNetAssignmentScope(str, Enum): + + none = "none" #: No dynamic VNet assignment is enabled. + job = "job" #: Dynamic VNet assignment is done per-job. + + class InboundEndpointProtocol(str, Enum): tcp = "tcp" #: Use TCP for the endpoint. @@ -178,7 +190,6 @@ class PoolState(str, Enum): active = "active" #: The pool is available to run tasks subject to the availability of compute nodes. deleting = "deleting" #: The user has requested that the pool be deleted, but the delete operation has not yet completed. - upgrading = "upgrading" #: The user has requested that the operating system of the pool's nodes be upgraded, but the upgrade operation has not yet completed (that is, some nodes in the pool have not yet been upgraded). While upgrading, the pool may be able to run tasks (with reduced capacity) but this is not guaranteed. class AllocationState(str, Enum): @@ -230,7 +241,7 @@ class ComputeNodeState(str, Enum): unknown = "unknown" #: The Batch service has lost contact with the node, and does not know its true state. leaving_pool = "leavingpool" #: The node is leaving the pool, either because the user explicitly removed it or because the pool is resizing or autoscaling down. offline = "offline" #: The node is not currently running a task, and scheduling of new tasks to the node is disabled. - preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was preempted will be rescheduled when another node becomes available. + preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was pre-empted will be rescheduled when another node becomes available. class SchedulingState(str, Enum): diff --git a/azure-batch/azure/batch/models/cloud_job.py b/azure-batch/azure/batch/models/cloud_job.py index 4e6cafbf48d9..374d7d0a9264 100644 --- a/azure-batch/azure/batch/models/cloud_job.py +++ b/azure-batch/azure/batch/models/cloud_job.py @@ -92,6 +92,8 @@ class CloudJob(Model): default is noaction. Possible values include: 'noAction', 'performExitOptionsJobAction' :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration :param metadata: A list of name-value pairs associated with the job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. @@ -126,6 +128,7 @@ class CloudJob(Model): 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, 'stats': {'key': 'stats', 'type': 'JobStatistics'}, @@ -153,6 +156,7 @@ def __init__(self, **kwargs): self.pool_info = kwargs.get('pool_info', None) self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) self.metadata = kwargs.get('metadata', None) self.execution_info = kwargs.get('execution_info', None) self.stats = kwargs.get('stats', None) diff --git a/azure-batch/azure/batch/models/cloud_job_py3.py b/azure-batch/azure/batch/models/cloud_job_py3.py index 226cb3bb7cc3..dd9c58164b80 100644 --- a/azure-batch/azure/batch/models/cloud_job_py3.py +++ b/azure-batch/azure/batch/models/cloud_job_py3.py @@ -92,6 +92,8 @@ class CloudJob(Model): default is noaction. Possible values include: 'noAction', 'performExitOptionsJobAction' :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration :param metadata: A list of name-value pairs associated with the job as metadata. The Batch service does not assign any meaning to metadata; it is solely for the use of user code. @@ -126,12 +128,13 @@ class CloudJob(Model): 'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'}, 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'execution_info': {'key': 'executionInfo', 'type': 'JobExecutionInformation'}, 'stats': {'key': 'stats', 'type': 'JobStatistics'}, } - def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None: + def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependencies: bool=None, url: str=None, e_tag: str=None, last_modified=None, creation_time=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, pool_info=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, metadata=None, execution_info=None, stats=None, **kwargs) -> None: super(CloudJob, self).__init__(**kwargs) self.id = id self.display_name = display_name @@ -153,6 +156,7 @@ def __init__(self, *, id: str=None, display_name: str=None, uses_task_dependenci self.pool_info = pool_info self.on_all_tasks_complete = on_all_tasks_complete self.on_task_failure = on_task_failure + self.network_configuration = network_configuration self.metadata = metadata self.execution_info = execution_info self.stats = stats diff --git a/azure-batch/azure/batch/models/cloud_pool.py b/azure-batch/azure/batch/models/cloud_pool.py index 8d85502dba7f..58cdaa477c9e 100644 --- a/azure-batch/azure/batch/models/cloud_pool.py +++ b/azure-batch/azure/batch/models/cloud_pool.py @@ -41,7 +41,7 @@ class CloudPool(Model): :param creation_time: The creation time of the pool. :type creation_time: datetime :param state: The current state of the pool. Possible values include: - 'active', 'deleting', 'upgrading' + 'active', 'deleting' :type state: str or ~azure.batch.models.PoolState :param state_transition_time: The time at which the pool entered its current state. diff --git a/azure-batch/azure/batch/models/cloud_pool_py3.py b/azure-batch/azure/batch/models/cloud_pool_py3.py index b1166d42dc01..a4417bdd186a 100644 --- a/azure-batch/azure/batch/models/cloud_pool_py3.py +++ b/azure-batch/azure/batch/models/cloud_pool_py3.py @@ -41,7 +41,7 @@ class CloudPool(Model): :param creation_time: The creation time of the pool. :type creation_time: datetime :param state: The current state of the pool. Possible values include: - 'active', 'deleting', 'upgrading' + 'active', 'deleting' :type state: str or ~azure.batch.models.PoolState :param state_transition_time: The time at which the pool entered its current state. diff --git a/azure-batch/azure/batch/models/cloud_service_configuration.py b/azure-batch/azure/batch/models/cloud_service_configuration.py index d86bae4012d8..ac8f6ef15b3b 100644 --- a/azure-batch/azure/batch/models/cloud_service_configuration.py +++ b/azure-batch/azure/batch/models/cloud_service_configuration.py @@ -16,9 +16,6 @@ class CloudServiceConfiguration(Model): """The configuration for nodes in a pool based on the Azure Cloud Services platform. - Variables are only populated by the server, and will be ignored when - sending a request. - All required parameters must be populated in order to send to Azure. :param os_family: Required. The Azure Guest OS family to be installed on @@ -30,32 +27,22 @@ class CloudServiceConfiguration(Model): see Azure Guest OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). :type os_family: str - :param target_os_version: The Azure Guest OS version to be installed on - the virtual machines in the pool. The default value is * which specifies - the latest operating system version for the specified OS family. - :type target_os_version: str - :ivar current_os_version: The Azure Guest OS Version currently installed - on the virtual machines in the pool. This may differ from targetOSVersion - if the pool state is Upgrading. In this case some virtual machines may be - on the targetOSVersion and some may be on the currentOSVersion during the - upgrade process. Once all virtual machines have upgraded, currentOSVersion - is updated to be the same as targetOSVersion. - :vartype current_os_version: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str """ _validation = { 'os_family': {'required': True}, - 'current_os_version': {'readonly': True}, } _attribute_map = { 'os_family': {'key': 'osFamily', 'type': 'str'}, - 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, - 'current_os_version': {'key': 'currentOSVersion', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, } def __init__(self, **kwargs): super(CloudServiceConfiguration, self).__init__(**kwargs) self.os_family = kwargs.get('os_family', None) - self.target_os_version = kwargs.get('target_os_version', None) - self.current_os_version = None + self.os_version = kwargs.get('os_version', None) diff --git a/azure-batch/azure/batch/models/cloud_service_configuration_py3.py b/azure-batch/azure/batch/models/cloud_service_configuration_py3.py index eb53f78f23d2..10c73377e085 100644 --- a/azure-batch/azure/batch/models/cloud_service_configuration_py3.py +++ b/azure-batch/azure/batch/models/cloud_service_configuration_py3.py @@ -16,9 +16,6 @@ class CloudServiceConfiguration(Model): """The configuration for nodes in a pool based on the Azure Cloud Services platform. - Variables are only populated by the server, and will be ignored when - sending a request. - All required parameters must be populated in order to send to Azure. :param os_family: Required. The Azure Guest OS family to be installed on @@ -30,32 +27,22 @@ class CloudServiceConfiguration(Model): see Azure Guest OS Releases (https://azure.microsoft.com/documentation/articles/cloud-services-guestos-update-matrix/#releases). :type os_family: str - :param target_os_version: The Azure Guest OS version to be installed on - the virtual machines in the pool. The default value is * which specifies - the latest operating system version for the specified OS family. - :type target_os_version: str - :ivar current_os_version: The Azure Guest OS Version currently installed - on the virtual machines in the pool. This may differ from targetOSVersion - if the pool state is Upgrading. In this case some virtual machines may be - on the targetOSVersion and some may be on the currentOSVersion during the - upgrade process. Once all virtual machines have upgraded, currentOSVersion - is updated to be the same as targetOSVersion. - :vartype current_os_version: str + :param os_version: The Azure Guest OS version to be installed on the + virtual machines in the pool. The default value is * which specifies the + latest operating system version for the specified OS family. + :type os_version: str """ _validation = { 'os_family': {'required': True}, - 'current_os_version': {'readonly': True}, } _attribute_map = { 'os_family': {'key': 'osFamily', 'type': 'str'}, - 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, - 'current_os_version': {'key': 'currentOSVersion', 'type': 'str'}, + 'os_version': {'key': 'osVersion', 'type': 'str'}, } - def __init__(self, *, os_family: str, target_os_version: str=None, **kwargs) -> None: + def __init__(self, *, os_family: str, os_version: str=None, **kwargs) -> None: super(CloudServiceConfiguration, self).__init__(**kwargs) self.os_family = os_family - self.target_os_version = target_os_version - self.current_os_version = None + self.os_version = os_version diff --git a/azure-batch/azure/batch/models/compute_node.py b/azure-batch/azure/batch/models/compute_node.py index 691adc5bd548..0321d6e73ead 100644 --- a/azure-batch/azure/batch/models/compute_node.py +++ b/azure-batch/azure/batch/models/compute_node.py @@ -24,7 +24,7 @@ class ComputeNode(Model): :type url: str :param state: The current state of the compute node. The low-priority node has been preempted. Tasks which were running on the node when it was - preempted will be rescheduled when another node becomes available. + pre-empted will be rescheduled when another node becomes available. Possible values include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' @@ -35,11 +35,13 @@ class ComputeNode(Model): :param state_transition_time: The time at which the compute node entered its current state. :type state_transition_time: datetime - :param last_boot_time: The time at which the compute node was started. - This property may not be present if the node state is unusable. + :param last_boot_time: The last time at which the compute node was + started. This property may not be present if the node state is unusable. :type last_boot_time: datetime :param allocation_time: The time at which this compute node was allocated - to the pool. + to the pool. This is the time when the node was initially allocated and + doesn't change once set. It is not updated when the node is service healed + or preempted. :type allocation_time: datetime :param ip_address: The IP address that other compute nodes can use to communicate with this compute node. Every node that is added to a pool is diff --git a/azure-batch/azure/batch/models/compute_node_py3.py b/azure-batch/azure/batch/models/compute_node_py3.py index 7b9c41a9c07e..dff5d069add2 100644 --- a/azure-batch/azure/batch/models/compute_node_py3.py +++ b/azure-batch/azure/batch/models/compute_node_py3.py @@ -24,7 +24,7 @@ class ComputeNode(Model): :type url: str :param state: The current state of the compute node. The low-priority node has been preempted. Tasks which were running on the node when it was - preempted will be rescheduled when another node becomes available. + pre-empted will be rescheduled when another node becomes available. Possible values include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' @@ -35,11 +35,13 @@ class ComputeNode(Model): :param state_transition_time: The time at which the compute node entered its current state. :type state_transition_time: datetime - :param last_boot_time: The time at which the compute node was started. - This property may not be present if the node state is unusable. + :param last_boot_time: The last time at which the compute node was + started. This property may not be present if the node state is unusable. :type last_boot_time: datetime :param allocation_time: The time at which this compute node was allocated - to the pool. + to the pool. This is the time when the node was initially allocated and + doesn't change once set. It is not updated when the node is service healed + or preempted. :type allocation_time: datetime :param ip_address: The IP address that other compute nodes can use to communicate with this compute node. Every node that is added to a pool is diff --git a/azure-batch/azure/batch/models/exit_options.py b/azure-batch/azure/batch/models/exit_options.py index 3e2dee00f962..f7f5e7b10578 100644 --- a/azure-batch/azure/batch/models/exit_options.py +++ b/azure-batch/azure/batch/models/exit_options.py @@ -19,7 +19,7 @@ class ExitOptions(Model): the task completes with the given exit condition and the job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the - job's onTaskFailed property is noAction, then specifying this property + job's onTaskFailed property is noaction, then specifying this property returns an error and the add task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'none', 'disable', diff --git a/azure-batch/azure/batch/models/exit_options_py3.py b/azure-batch/azure/batch/models/exit_options_py3.py index ac81ee1b74fa..0867bbea15a8 100644 --- a/azure-batch/azure/batch/models/exit_options_py3.py +++ b/azure-batch/azure/batch/models/exit_options_py3.py @@ -19,7 +19,7 @@ class ExitOptions(Model): the task completes with the given exit condition and the job's onTaskFailed property is 'performExitOptionsJobAction'. The default is none for exit code 0 and terminate for all other exit conditions. If the - job's onTaskFailed property is noAction, then specifying this property + job's onTaskFailed property is noaction, then specifying this property returns an error and the add task request fails with an invalid property value error; if you are calling the REST API directly, the HTTP status code is 400 (Bad Request). Possible values include: 'none', 'disable', diff --git a/azure-batch/azure/batch/models/image_reference.py b/azure-batch/azure/batch/models/image_reference.py index 41fb7fbcc9a8..36f0cb14e852 100644 --- a/azure-batch/azure/batch/models/image_reference.py +++ b/azure-batch/azure/batch/models/image_reference.py @@ -37,9 +37,8 @@ class ImageReference(Model): /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. This property is mutually exclusive with other ImageReference properties. The virtual machine image must be in the same region and subscription as - the Azure Batch account. For information about the firewall settings for - the Batch node agent to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + the Azure Batch account. For more details, see + https://docs.microsoft.com/azure/batch/batch-custom-images. :type virtual_machine_image_id: str """ diff --git a/azure-batch/azure/batch/models/image_reference_py3.py b/azure-batch/azure/batch/models/image_reference_py3.py index 7471294d297b..1a40536ee2ce 100644 --- a/azure-batch/azure/batch/models/image_reference_py3.py +++ b/azure-batch/azure/batch/models/image_reference_py3.py @@ -37,9 +37,8 @@ class ImageReference(Model): /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. This property is mutually exclusive with other ImageReference properties. The virtual machine image must be in the same region and subscription as - the Azure Batch account. For information about the firewall settings for - the Batch node agent to communicate with the Batch service see - https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + the Azure Batch account. For more details, see + https://docs.microsoft.com/azure/batch/batch-custom-images. :type virtual_machine_image_id: str """ diff --git a/azure-batch/azure/batch/models/job_add_parameter.py b/azure-batch/azure/batch/models/job_add_parameter.py index f79558f4fa6d..659c69755cef 100644 --- a/azure-batch/azure/batch/models/job_add_parameter.py +++ b/azure-batch/azure/batch/models/job_add_parameter.py @@ -94,6 +94,8 @@ class JobAddParameter(Model): :param uses_task_dependencies: Whether tasks in the job can define dependencies on each other. The default is false. :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration """ _validation = { @@ -115,6 +117,7 @@ class JobAddParameter(Model): 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, } def __init__(self, **kwargs): @@ -132,3 +135,4 @@ def __init__(self, **kwargs): self.on_task_failure = kwargs.get('on_task_failure', None) self.metadata = kwargs.get('metadata', None) self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) + self.network_configuration = kwargs.get('network_configuration', None) diff --git a/azure-batch/azure/batch/models/job_add_parameter_py3.py b/azure-batch/azure/batch/models/job_add_parameter_py3.py index e09577550b8b..08c329c3f8f2 100644 --- a/azure-batch/azure/batch/models/job_add_parameter_py3.py +++ b/azure-batch/azure/batch/models/job_add_parameter_py3.py @@ -94,6 +94,8 @@ class JobAddParameter(Model): :param uses_task_dependencies: Whether tasks in the job can define dependencies on each other. The default is false. :type uses_task_dependencies: bool + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration """ _validation = { @@ -115,9 +117,10 @@ class JobAddParameter(Model): 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, } - def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, **kwargs) -> None: + def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, on_all_tasks_complete=None, on_task_failure=None, metadata=None, uses_task_dependencies: bool=None, network_configuration=None, **kwargs) -> None: super(JobAddParameter, self).__init__(**kwargs) self.id = id self.display_name = display_name @@ -132,3 +135,4 @@ def __init__(self, *, id: str, pool_info, display_name: str=None, priority: int= self.on_task_failure = on_task_failure self.metadata = metadata self.uses_task_dependencies = uses_task_dependencies + self.network_configuration = network_configuration diff --git a/azure-batch/azure/batch/models/job_network_configuration.py b/azure-batch/azure/batch/models/job_network_configuration.py new file mode 100644 index 000000000000..b7dee32c659e --- /dev/null +++ b/azure-batch/azure/batch/models/job_network_configuration.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which nodes running tasks from the job will join for the + duration of the task. This is only supported for jobs running on + VirtualMachineConfiguration pools. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes which will run tasks from the + job. For more details, see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = kwargs.get('subnet_id', None) diff --git a/azure-batch/azure/batch/models/job_network_configuration_py3.py b/azure-batch/azure/batch/models/job_network_configuration_py3.py new file mode 100644 index 000000000000..d47cbb6d576d --- /dev/null +++ b/azure-batch/azure/batch/models/job_network_configuration_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class JobNetworkConfiguration(Model): + """The network configuration for the job. + + All required parameters must be populated in order to send to Azure. + + :param subnet_id: Required. The ARM resource identifier of the virtual + network subnet which nodes running tasks from the job will join for the + duration of the task. This is only supported for jobs running on + VirtualMachineConfiguration pools. This is of the form + /subscriptions/{subscription}/resourceGroups/{group}/providers/{provider}/virtualNetworks/{network}/subnets/{subnet}. + The virtual network must be in the same region and subscription as the + Azure Batch account. The specified subnet should have enough free IP + addresses to accommodate the number of nodes which will run tasks from the + job. For more details, see + https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + :type subnet_id: str + """ + + _validation = { + 'subnet_id': {'required': True}, + } + + _attribute_map = { + 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + } + + def __init__(self, *, subnet_id: str, **kwargs) -> None: + super(JobNetworkConfiguration, self).__init__(**kwargs) + self.subnet_id = subnet_id diff --git a/azure-batch/azure/batch/models/job_release_task.py b/azure-batch/azure/batch/models/job_release_task.py index cb91dd91ebf8..32b97e5c6fb6 100644 --- a/azure-batch/azure/batch/models/job_release_task.py +++ b/azure-batch/azure/batch/models/job_release_task.py @@ -85,8 +85,8 @@ class JobReleaseTask(Model): :param retention_time: The minimum time to retain the task directory for the Job Release task on the compute node. After this time, the Batch service may delete the task directory and all its contents. The default is - infinite, i.e. the task directory will be retained until the compute node - is removed or reimaged. + 7 days, i.e. the task directory will be retained for 7 days unless the + compute node is removed or the job is deleted. :type retention_time: timedelta :param user_identity: The user identity under which the Job Release task runs. If omitted, the task runs as a non-administrative user unique to the diff --git a/azure-batch/azure/batch/models/job_release_task_py3.py b/azure-batch/azure/batch/models/job_release_task_py3.py index e8febe4c39eb..a12fbc8f42d4 100644 --- a/azure-batch/azure/batch/models/job_release_task_py3.py +++ b/azure-batch/azure/batch/models/job_release_task_py3.py @@ -85,8 +85,8 @@ class JobReleaseTask(Model): :param retention_time: The minimum time to retain the task directory for the Job Release task on the compute node. After this time, the Batch service may delete the task directory and all its contents. The default is - infinite, i.e. the task directory will be retained until the compute node - is removed or reimaged. + 7 days, i.e. the task directory will be retained for 7 days unless the + compute node is removed or the job is deleted. :type retention_time: timedelta :param user_identity: The user identity under which the Job Release task runs. If omitted, the task runs as a non-administrative user unique to the diff --git a/azure-batch/azure/batch/models/job_specification.py b/azure-batch/azure/batch/models/job_specification.py index 5ff5a6b0c937..ebea9ad06590 100644 --- a/azure-batch/azure/batch/models/job_specification.py +++ b/azure-batch/azure/batch/models/job_specification.py @@ -49,6 +49,8 @@ class JobSpecification(Model): resource file download error. The default is noaction. Possible values include: 'noAction', 'performExitOptionsJobAction' :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration :param constraints: The execution constraints for jobs created under this schedule. :type constraints: ~azure.batch.models.JobConstraints @@ -100,6 +102,7 @@ class JobSpecification(Model): 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, @@ -116,6 +119,7 @@ def __init__(self, **kwargs): self.uses_task_dependencies = kwargs.get('uses_task_dependencies', None) self.on_all_tasks_complete = kwargs.get('on_all_tasks_complete', None) self.on_task_failure = kwargs.get('on_task_failure', None) + self.network_configuration = kwargs.get('network_configuration', None) self.constraints = kwargs.get('constraints', None) self.job_manager_task = kwargs.get('job_manager_task', None) self.job_preparation_task = kwargs.get('job_preparation_task', None) diff --git a/azure-batch/azure/batch/models/job_specification_py3.py b/azure-batch/azure/batch/models/job_specification_py3.py index 09484bf88c66..2c1f2a0c9ea3 100644 --- a/azure-batch/azure/batch/models/job_specification_py3.py +++ b/azure-batch/azure/batch/models/job_specification_py3.py @@ -49,6 +49,8 @@ class JobSpecification(Model): resource file download error. The default is noaction. Possible values include: 'noAction', 'performExitOptionsJobAction' :type on_task_failure: str or ~azure.batch.models.OnTaskFailure + :param network_configuration: The network configuration for the job. + :type network_configuration: ~azure.batch.models.JobNetworkConfiguration :param constraints: The execution constraints for jobs created under this schedule. :type constraints: ~azure.batch.models.JobConstraints @@ -100,6 +102,7 @@ class JobSpecification(Model): 'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'}, 'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'}, 'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'}, + 'network_configuration': {'key': 'networkConfiguration', 'type': 'JobNetworkConfiguration'}, 'constraints': {'key': 'constraints', 'type': 'JobConstraints'}, 'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'}, 'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'}, @@ -109,13 +112,14 @@ class JobSpecification(Model): 'metadata': {'key': 'metadata', 'type': '[MetadataItem]'}, } - def __init__(self, *, pool_info, priority: int=None, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None: + def __init__(self, *, pool_info, priority: int=None, display_name: str=None, uses_task_dependencies: bool=None, on_all_tasks_complete=None, on_task_failure=None, network_configuration=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None, **kwargs) -> None: super(JobSpecification, self).__init__(**kwargs) self.priority = priority self.display_name = display_name self.uses_task_dependencies = uses_task_dependencies self.on_all_tasks_complete = on_all_tasks_complete self.on_task_failure = on_task_failure + self.network_configuration = network_configuration self.constraints = constraints self.job_manager_task = job_manager_task self.job_preparation_task = job_preparation_task diff --git a/azure-batch/azure/batch/models/network_configuration.py b/azure-batch/azure/batch/models/network_configuration.py index 2bb54a8e3f64..f3f28019f62d 100644 --- a/azure-batch/azure/batch/models/network_configuration.py +++ b/azure-batch/azure/batch/models/network_configuration.py @@ -22,26 +22,17 @@ class NetworkConfiguration(Model): Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate - compute nodes, and a resize error will occur. The 'MicrosoftAzureBatch' - service principal must have the 'Classic Virtual Machine Contributor' - Role-Based Access Control (RBAC) role for the specified VNet. The - specified subnet must allow communication from the Azure Batch service to - be able to schedule tasks on the compute nodes. This can be verified by - checking if the specified VNet has any associated Network Security Groups - (NSG). If communication to the compute nodes in the specified subnet is - denied by an NSG, then the Batch service will set the state of the compute - nodes to unusable. For pools created with virtualMachineConfiguration only - ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, - but for pools created with cloudServiceConfiguration both ARM and classic - virtual networks are supported. If the specified VNet has any associated - Network Security Groups (NSG), then a few reserved system ports must be - enabled for inbound communication. For pools created with a virtual - machine configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. For pools created with a cloud - service configuration, enable ports 10100, 20100, and 30100. Also enable - outbound connections to Azure Storage on port 443. For more details see: + compute nodes, and a resize error will occur. For pools created with + virtualMachineConfiguration only ARM virtual networks + ('Microsoft.Network/virtualNetworks') are supported, but for pools created + with cloudServiceConfiguration both ARM and classic virtual networks are + supported. For more details, see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope :param endpoint_configuration: The configuration for endpoints on compute nodes in the Batch pool. Pool endpoint configuration is only supported on pools with the virtualMachineConfiguration property. @@ -51,10 +42,12 @@ class NetworkConfiguration(Model): _attribute_map = { 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, } def __init__(self, **kwargs): super(NetworkConfiguration, self).__init__(**kwargs) self.subnet_id = kwargs.get('subnet_id', None) + self.dynamic_vnet_assignment_scope = kwargs.get('dynamic_vnet_assignment_scope', None) self.endpoint_configuration = kwargs.get('endpoint_configuration', None) diff --git a/azure-batch/azure/batch/models/network_configuration_py3.py b/azure-batch/azure/batch/models/network_configuration_py3.py index 475d1adcabda..3af5d030eba9 100644 --- a/azure-batch/azure/batch/models/network_configuration_py3.py +++ b/azure-batch/azure/batch/models/network_configuration_py3.py @@ -22,26 +22,17 @@ class NetworkConfiguration(Model): Azure Batch account. The specified subnet should have enough free IP addresses to accommodate the number of nodes in the pool. If the subnet doesn't have enough free IP addresses, the pool will partially allocate - compute nodes, and a resize error will occur. The 'MicrosoftAzureBatch' - service principal must have the 'Classic Virtual Machine Contributor' - Role-Based Access Control (RBAC) role for the specified VNet. The - specified subnet must allow communication from the Azure Batch service to - be able to schedule tasks on the compute nodes. This can be verified by - checking if the specified VNet has any associated Network Security Groups - (NSG). If communication to the compute nodes in the specified subnet is - denied by an NSG, then the Batch service will set the state of the compute - nodes to unusable. For pools created with virtualMachineConfiguration only - ARM virtual networks ('Microsoft.Network/virtualNetworks') are supported, - but for pools created with cloudServiceConfiguration both ARM and classic - virtual networks are supported. If the specified VNet has any associated - Network Security Groups (NSG), then a few reserved system ports must be - enabled for inbound communication. For pools created with a virtual - machine configuration, enable ports 29876 and 29877, as well as port 22 - for Linux and port 3389 for Windows. For pools created with a cloud - service configuration, enable ports 10100, 20100, and 30100. Also enable - outbound connections to Azure Storage on port 443. For more details see: + compute nodes, and a resize error will occur. For pools created with + virtualMachineConfiguration only ARM virtual networks + ('Microsoft.Network/virtualNetworks') are supported, but for pools created + with cloudServiceConfiguration both ARM and classic virtual networks are + supported. For more details, see: https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration :type subnet_id: str + :param dynamic_vnet_assignment_scope: The scope of dynamic vnet + assignment. Possible values include: 'none', 'job' + :type dynamic_vnet_assignment_scope: str or + ~azure.batch.models.DynamicVNetAssignmentScope :param endpoint_configuration: The configuration for endpoints on compute nodes in the Batch pool. Pool endpoint configuration is only supported on pools with the virtualMachineConfiguration property. @@ -51,10 +42,12 @@ class NetworkConfiguration(Model): _attribute_map = { 'subnet_id': {'key': 'subnetId', 'type': 'str'}, + 'dynamic_vnet_assignment_scope': {'key': 'dynamicVNetAssignmentScope', 'type': 'DynamicVNetAssignmentScope'}, 'endpoint_configuration': {'key': 'endpointConfiguration', 'type': 'PoolEndpointConfiguration'}, } - def __init__(self, *, subnet_id: str=None, endpoint_configuration=None, **kwargs) -> None: + def __init__(self, *, subnet_id: str=None, dynamic_vnet_assignment_scope=None, endpoint_configuration=None, **kwargs) -> None: super(NetworkConfiguration, self).__init__(**kwargs) self.subnet_id = subnet_id + self.dynamic_vnet_assignment_scope = dynamic_vnet_assignment_scope self.endpoint_configuration = endpoint_configuration diff --git a/azure-batch/azure/batch/models/os_disk.py b/azure-batch/azure/batch/models/os_disk.py deleted file mode 100644 index 08916fa20048..000000000000 --- a/azure-batch/azure/batch/models/os_disk.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class OSDisk(Model): - """Settings for the operating system disk of the virtual machine. - - :param caching: The type of caching to enable for the OS disk. The default - value for caching is readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Possible values include: 'none', 'readOnly', 'readWrite' - :type caching: str or ~azure.batch.models.CachingType - """ - - _attribute_map = { - 'caching': {'key': 'caching', 'type': 'CachingType'}, - } - - def __init__(self, **kwargs): - super(OSDisk, self).__init__(**kwargs) - self.caching = kwargs.get('caching', None) diff --git a/azure-batch/azure/batch/models/os_disk_py3.py b/azure-batch/azure/batch/models/os_disk_py3.py deleted file mode 100644 index 4780c4fa1443..000000000000 --- a/azure-batch/azure/batch/models/os_disk_py3.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class OSDisk(Model): - """Settings for the operating system disk of the virtual machine. - - :param caching: The type of caching to enable for the OS disk. The default - value for caching is readwrite. For information about the caching options - see: - https://blogs.msdn.microsoft.com/windowsazurestorage/2012/06/27/exploring-windows-azure-drives-disks-and-images/. - Possible values include: 'none', 'readOnly', 'readWrite' - :type caching: str or ~azure.batch.models.CachingType - """ - - _attribute_map = { - 'caching': {'key': 'caching', 'type': 'CachingType'}, - } - - def __init__(self, *, caching=None, **kwargs) -> None: - super(OSDisk, self).__init__(**kwargs) - self.caching = caching diff --git a/azure-batch/azure/batch/models/pool_upgrade_os_options.py b/azure-batch/azure/batch/models/pool_upgrade_os_options.py deleted file mode 100644 index fbdfdf91e457..000000000000 --- a/azure-batch/azure/batch/models/pool_upgrade_os_options.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class PoolUpgradeOsOptions(Model): - """Additional parameters for upgrade_os operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, **kwargs): - super(PoolUpgradeOsOptions, self).__init__(**kwargs) - self.timeout = kwargs.get('timeout', 30) - self.client_request_id = kwargs.get('client_request_id', None) - self.return_client_request_id = kwargs.get('return_client_request_id', False) - self.ocp_date = kwargs.get('ocp_date', None) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - self.if_modified_since = kwargs.get('if_modified_since', None) - self.if_unmodified_since = kwargs.get('if_unmodified_since', None) diff --git a/azure-batch/azure/batch/models/pool_upgrade_os_options_py3.py b/azure-batch/azure/batch/models/pool_upgrade_os_options_py3.py deleted file mode 100644 index 67884745c1c4..000000000000 --- a/azure-batch/azure/batch/models/pool_upgrade_os_options_py3.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class PoolUpgradeOsOptions(Model): - """Additional parameters for upgrade_os operation. - - :param timeout: The maximum time that the server can spend processing the - request, in seconds. The default is 30 seconds. Default value: 30 . - :type timeout: int - :param client_request_id: The caller-generated request identity, in the - form of a GUID with no decoration such as curly braces, e.g. - 9C4D50EE-2D56-4CD3-8152-34347DC9F2B0. - :type client_request_id: str - :param return_client_request_id: Whether the server should return the - client-request-id in the response. Default value: False . - :type return_client_request_id: bool - :param ocp_date: The time the request was issued. Client libraries - typically set this to the current system clock time; set it explicitly if - you are calling the REST API directly. - :type ocp_date: datetime - :param if_match: An ETag value associated with the version of the resource - known to the client. The operation will be performed only if the - resource's current ETag on the service exactly matches the value specified - by the client. - :type if_match: str - :param if_none_match: An ETag value associated with the version of the - resource known to the client. The operation will be performed only if the - resource's current ETag on the service does not match the value specified - by the client. - :type if_none_match: str - :param if_modified_since: A timestamp indicating the last modified time of - the resource known to the client. The operation will be performed only if - the resource on the service has been modified since the specified time. - :type if_modified_since: datetime - :param if_unmodified_since: A timestamp indicating the last modified time - of the resource known to the client. The operation will be performed only - if the resource on the service has not been modified since the specified - time. - :type if_unmodified_since: datetime - """ - - _attribute_map = { - 'timeout': {'key': '', 'type': 'int'}, - 'client_request_id': {'key': '', 'type': 'str'}, - 'return_client_request_id': {'key': '', 'type': 'bool'}, - 'ocp_date': {'key': '', 'type': 'rfc-1123'}, - 'if_match': {'key': '', 'type': 'str'}, - 'if_none_match': {'key': '', 'type': 'str'}, - 'if_modified_since': {'key': '', 'type': 'rfc-1123'}, - 'if_unmodified_since': {'key': '', 'type': 'rfc-1123'}, - } - - def __init__(self, *, timeout: int=30, client_request_id: str=None, return_client_request_id: bool=False, ocp_date=None, if_match: str=None, if_none_match: str=None, if_modified_since=None, if_unmodified_since=None, **kwargs) -> None: - super(PoolUpgradeOsOptions, self).__init__(**kwargs) - self.timeout = timeout - self.client_request_id = client_request_id - self.return_client_request_id = return_client_request_id - self.ocp_date = ocp_date - self.if_match = if_match - self.if_none_match = if_none_match - self.if_modified_since = if_modified_since - self.if_unmodified_since = if_unmodified_since diff --git a/azure-batch/azure/batch/models/pool_upgrade_os_parameter.py b/azure-batch/azure/batch/models/pool_upgrade_os_parameter.py deleted file mode 100644 index 17141ac0e652..000000000000 --- a/azure-batch/azure/batch/models/pool_upgrade_os_parameter.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class PoolUpgradeOSParameter(Model): - """Options for upgrading the operating system of compute nodes in a pool. - - All required parameters must be populated in order to send to Azure. - - :param target_os_version: Required. The Azure Guest OS version to be - installed on the virtual machines in the pool. - :type target_os_version: str - """ - - _validation = { - 'target_os_version': {'required': True}, - } - - _attribute_map = { - 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(PoolUpgradeOSParameter, self).__init__(**kwargs) - self.target_os_version = kwargs.get('target_os_version', None) diff --git a/azure-batch/azure/batch/models/pool_upgrade_os_parameter_py3.py b/azure-batch/azure/batch/models/pool_upgrade_os_parameter_py3.py deleted file mode 100644 index de2169d028da..000000000000 --- a/azure-batch/azure/batch/models/pool_upgrade_os_parameter_py3.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -from msrest.serialization import Model - - -class PoolUpgradeOSParameter(Model): - """Options for upgrading the operating system of compute nodes in a pool. - - All required parameters must be populated in order to send to Azure. - - :param target_os_version: Required. The Azure Guest OS version to be - installed on the virtual machines in the pool. - :type target_os_version: str - """ - - _validation = { - 'target_os_version': {'required': True}, - } - - _attribute_map = { - 'target_os_version': {'key': 'targetOSVersion', 'type': 'str'}, - } - - def __init__(self, *, target_os_version: str, **kwargs) -> None: - super(PoolUpgradeOSParameter, self).__init__(**kwargs) - self.target_os_version = target_os_version diff --git a/azure-batch/azure/batch/models/pool_usage_metrics.py b/azure-batch/azure/batch/models/pool_usage_metrics.py index 93cfa03f6d8f..88153c67e20b 100644 --- a/azure-batch/azure/batch/models/pool_usage_metrics.py +++ b/azure-batch/azure/batch/models/pool_usage_metrics.py @@ -35,12 +35,6 @@ class PoolUsageMetrics(Model): :param total_core_hours: Required. The total core hours used in the pool during this aggregation interval. :type total_core_hours: float - :param data_ingress_gi_b: Required. The cross data center network ingress - to the pool during this interval, in GiB. - :type data_ingress_gi_b: float - :param data_egress_gi_b: Required. The cross data center network egress - from the pool during this interval, in GiB. - :type data_egress_gi_b: float """ _validation = { @@ -49,8 +43,6 @@ class PoolUsageMetrics(Model): 'end_time': {'required': True}, 'vm_size': {'required': True}, 'total_core_hours': {'required': True}, - 'data_ingress_gi_b': {'required': True}, - 'data_egress_gi_b': {'required': True}, } _attribute_map = { @@ -59,8 +51,6 @@ class PoolUsageMetrics(Model): 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, 'vm_size': {'key': 'vmSize', 'type': 'str'}, 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, - 'data_ingress_gi_b': {'key': 'dataIngressGiB', 'type': 'float'}, - 'data_egress_gi_b': {'key': 'dataEgressGiB', 'type': 'float'}, } def __init__(self, **kwargs): @@ -70,5 +60,3 @@ def __init__(self, **kwargs): self.end_time = kwargs.get('end_time', None) self.vm_size = kwargs.get('vm_size', None) self.total_core_hours = kwargs.get('total_core_hours', None) - self.data_ingress_gi_b = kwargs.get('data_ingress_gi_b', None) - self.data_egress_gi_b = kwargs.get('data_egress_gi_b', None) diff --git a/azure-batch/azure/batch/models/pool_usage_metrics_py3.py b/azure-batch/azure/batch/models/pool_usage_metrics_py3.py index 5c7ea9ebdf73..963246fcfb34 100644 --- a/azure-batch/azure/batch/models/pool_usage_metrics_py3.py +++ b/azure-batch/azure/batch/models/pool_usage_metrics_py3.py @@ -35,12 +35,6 @@ class PoolUsageMetrics(Model): :param total_core_hours: Required. The total core hours used in the pool during this aggregation interval. :type total_core_hours: float - :param data_ingress_gi_b: Required. The cross data center network ingress - to the pool during this interval, in GiB. - :type data_ingress_gi_b: float - :param data_egress_gi_b: Required. The cross data center network egress - from the pool during this interval, in GiB. - :type data_egress_gi_b: float """ _validation = { @@ -49,8 +43,6 @@ class PoolUsageMetrics(Model): 'end_time': {'required': True}, 'vm_size': {'required': True}, 'total_core_hours': {'required': True}, - 'data_ingress_gi_b': {'required': True}, - 'data_egress_gi_b': {'required': True}, } _attribute_map = { @@ -59,16 +51,12 @@ class PoolUsageMetrics(Model): 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, 'vm_size': {'key': 'vmSize', 'type': 'str'}, 'total_core_hours': {'key': 'totalCoreHours', 'type': 'float'}, - 'data_ingress_gi_b': {'key': 'dataIngressGiB', 'type': 'float'}, - 'data_egress_gi_b': {'key': 'dataEgressGiB', 'type': 'float'}, } - def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, data_ingress_gi_b: float, data_egress_gi_b: float, **kwargs) -> None: + def __init__(self, *, pool_id: str, start_time, end_time, vm_size: str, total_core_hours: float, **kwargs) -> None: super(PoolUsageMetrics, self).__init__(**kwargs) self.pool_id = pool_id self.start_time = start_time self.end_time = end_time self.vm_size = vm_size self.total_core_hours = total_core_hours - self.data_ingress_gi_b = data_ingress_gi_b - self.data_egress_gi_b = data_egress_gi_b diff --git a/azure-batch/azure/batch/models/resource_file.py b/azure-batch/azure/batch/models/resource_file.py index f7a04764b4a3..7fdaffb3690c 100644 --- a/azure-batch/azure/batch/models/resource_file.py +++ b/azure-batch/azure/batch/models/resource_file.py @@ -13,19 +13,50 @@ class ResourceFile(Model): - """A file to be downloaded from Azure blob storage to a compute node. + """A single file or multiple files to be downloaded to a compute node. - All required parameters must be populated in order to send to Azure. - - :param blob_source: Required. The URL of the file within Azure Blob - Storage. This URL must be readable using anonymous access; that is, the - Batch service does not present any credentials when downloading the blob. - There are two ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the blob, or - set the ACL for the blob or its container to allow public access. - :type blob_source: str - :param file_path: Required. The location on the compute node to which to - download the file, relative to the task's working directory. + :param auto_storage_container_name: The storage container name in the auto + storage account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read permissions on the container, or set the ACL for the + container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the compute node to which to download + the file(s), relative to the task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the task's working directory (for example by + using '..'). :type file_path: str :param file_mode: The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux compute nodes. It @@ -35,19 +66,20 @@ class ResourceFile(Model): :type file_mode: str """ - _validation = { - 'blob_source': {'required': True}, - 'file_path': {'required': True}, - } - _attribute_map = { - 'blob_source': {'key': 'blobSource', 'type': 'str'}, + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, 'file_path': {'key': 'filePath', 'type': 'str'}, 'file_mode': {'key': 'fileMode', 'type': 'str'}, } def __init__(self, **kwargs): super(ResourceFile, self).__init__(**kwargs) - self.blob_source = kwargs.get('blob_source', None) + self.auto_storage_container_name = kwargs.get('auto_storage_container_name', None) + self.storage_container_url = kwargs.get('storage_container_url', None) + self.http_url = kwargs.get('http_url', None) + self.blob_prefix = kwargs.get('blob_prefix', None) self.file_path = kwargs.get('file_path', None) self.file_mode = kwargs.get('file_mode', None) diff --git a/azure-batch/azure/batch/models/resource_file_py3.py b/azure-batch/azure/batch/models/resource_file_py3.py index b7c792e7b587..3a494085865f 100644 --- a/azure-batch/azure/batch/models/resource_file_py3.py +++ b/azure-batch/azure/batch/models/resource_file_py3.py @@ -13,19 +13,50 @@ class ResourceFile(Model): - """A file to be downloaded from Azure blob storage to a compute node. + """A single file or multiple files to be downloaded to a compute node. - All required parameters must be populated in order to send to Azure. - - :param blob_source: Required. The URL of the file within Azure Blob - Storage. This URL must be readable using anonymous access; that is, the - Batch service does not present any credentials when downloading the blob. - There are two ways to get such a URL for a blob in Azure storage: include - a Shared Access Signature (SAS) granting read permissions on the blob, or - set the ACL for the blob or its container to allow public access. - :type blob_source: str - :param file_path: Required. The location on the compute node to which to - download the file, relative to the task's working directory. + :param auto_storage_container_name: The storage container name in the auto + storage account. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. + :type auto_storage_container_name: str + :param storage_container_url: The URL of the blob container within Azure + Blob Storage. The autoStorageContainerName, storageContainerUrl and + httpUrl properties are mutually exclusive and one of them must be + specified. This URL must be readable and listable using anonymous access; + that is, the Batch service does not present any credentials when + downloading blobs from the container. There are two ways to get such a URL + for a container in Azure storage: include a Shared Access Signature (SAS) + granting read permissions on the container, or set the ACL for the + container to allow public access. + :type storage_container_url: str + :param http_url: The URL of the file to download. The + autoStorageContainerName, storageContainerUrl and httpUrl properties are + mutually exclusive and one of them must be specified. If the URL points to + Azure Blob Storage, it must be readable using anonymous access; that is, + the Batch service does not present any credentials when downloading the + blob. There are two ways to get such a URL for a blob in Azure storage: + include a Shared Access Signature (SAS) granting read permissions on the + blob, or set the ACL for the blob or its container to allow public access. + :type http_url: str + :param blob_prefix: The blob prefix to use when downloading blobs from an + Azure Storage container. Only the blobs whose names begin with the + specified prefix will be downloaded. The property is valid only when + autoStorageContainerName or storageContainerUrl is used. This prefix can + be a partial filename or a subdirectory. If a prefix is not specified, all + the files in the container will be downloaded. + :type blob_prefix: str + :param file_path: The location on the compute node to which to download + the file(s), relative to the task's working directory. If the httpUrl + property is specified, the filePath is required and describes the path + which the file will be downloaded to, including the filename. Otherwise, + if the autoStorageContainerName or storageContainerUrl property is + specified, filePath is optional and is the directory to download the files + to. In the case where filePath is used as a directory, any directory + structure already associated with the input data will be retained in full + and appended to the specified filePath directory. The specified relative + path cannot break out of the task's working directory (for example by + using '..'). :type file_path: str :param file_mode: The file permission mode attribute in octal format. This property applies only to files being downloaded to Linux compute nodes. It @@ -35,19 +66,20 @@ class ResourceFile(Model): :type file_mode: str """ - _validation = { - 'blob_source': {'required': True}, - 'file_path': {'required': True}, - } - _attribute_map = { - 'blob_source': {'key': 'blobSource', 'type': 'str'}, + 'auto_storage_container_name': {'key': 'autoStorageContainerName', 'type': 'str'}, + 'storage_container_url': {'key': 'storageContainerUrl', 'type': 'str'}, + 'http_url': {'key': 'httpUrl', 'type': 'str'}, + 'blob_prefix': {'key': 'blobPrefix', 'type': 'str'}, 'file_path': {'key': 'filePath', 'type': 'str'}, 'file_mode': {'key': 'fileMode', 'type': 'str'}, } - def __init__(self, *, blob_source: str, file_path: str, file_mode: str=None, **kwargs) -> None: + def __init__(self, *, auto_storage_container_name: str=None, storage_container_url: str=None, http_url: str=None, blob_prefix: str=None, file_path: str=None, file_mode: str=None, **kwargs) -> None: super(ResourceFile, self).__init__(**kwargs) - self.blob_source = blob_source + self.auto_storage_container_name = auto_storage_container_name + self.storage_container_url = storage_container_url + self.http_url = http_url + self.blob_prefix = blob_prefix self.file_path = file_path self.file_mode = file_mode diff --git a/azure-batch/azure/batch/models/task_add_parameter.py b/azure-batch/azure/batch/models/task_add_parameter.py index c0ac69877a6d..a509aa63ecd9 100644 --- a/azure-batch/azure/batch/models/task_add_parameter.py +++ b/azure-batch/azure/batch/models/task_add_parameter.py @@ -85,8 +85,8 @@ class TaskAddParameter(Model): :type affinity_info: ~azure.batch.models.AffinityInformation :param constraints: The execution constraints that apply to this task. If you do not specify constraints, the maxTaskRetryCount is the - maxTaskRetryCount specified for the job, and the maxWallClockTime and - retentionTime are infinite. + maxTaskRetryCount specified for the job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. :type constraints: ~azure.batch.models.TaskConstraints :param user_identity: The user identity under which the task runs. If omitted, the task runs as a non-administrative user unique to the task. diff --git a/azure-batch/azure/batch/models/task_add_parameter_py3.py b/azure-batch/azure/batch/models/task_add_parameter_py3.py index 31f571e71d31..0e72123d6678 100644 --- a/azure-batch/azure/batch/models/task_add_parameter_py3.py +++ b/azure-batch/azure/batch/models/task_add_parameter_py3.py @@ -85,8 +85,8 @@ class TaskAddParameter(Model): :type affinity_info: ~azure.batch.models.AffinityInformation :param constraints: The execution constraints that apply to this task. If you do not specify constraints, the maxTaskRetryCount is the - maxTaskRetryCount specified for the job, and the maxWallClockTime and - retentionTime are infinite. + maxTaskRetryCount specified for the job, the maxWallClockTime is infinite, + and the retentionTime is 7 days. :type constraints: ~azure.batch.models.TaskConstraints :param user_identity: The user identity under which the task runs. If omitted, the task runs as a non-administrative user unique to the task. diff --git a/azure-batch/azure/batch/models/task_constraints.py b/azure-batch/azure/batch/models/task_constraints.py index 22898fad6ff2..98ad92ccb414 100644 --- a/azure-batch/azure/batch/models/task_constraints.py +++ b/azure-batch/azure/batch/models/task_constraints.py @@ -23,8 +23,8 @@ class TaskConstraints(Model): :param retention_time: The minimum time to retain the task directory on the compute node where it ran, from the time it completes execution. After this time, the Batch service may delete the task directory and all its - contents. The default is infinite, i.e. the task directory will be - retained until the compute node is removed or reimaged. + contents. The default is 7 days, i.e. the task directory will be retained + for 7 days unless the compute node is removed or the job is deleted. :type retention_time: timedelta :param max_task_retry_count: The maximum number of times the task may be retried. The Batch service retries a task if its exit code is nonzero. @@ -34,9 +34,7 @@ class TaskConstraints(Model): maximum retry count is 3, Batch tries the task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the task after the first attempt. If the maximum retry - count is -1, the Batch service retries the task without limit. Resource - files and application packages are only downloaded again if the task is - retried on a new compute node. + count is -1, the Batch service retries the task without limit. :type max_task_retry_count: int """ diff --git a/azure-batch/azure/batch/models/task_constraints_py3.py b/azure-batch/azure/batch/models/task_constraints_py3.py index 2070d09669c6..db8da126b5ab 100644 --- a/azure-batch/azure/batch/models/task_constraints_py3.py +++ b/azure-batch/azure/batch/models/task_constraints_py3.py @@ -23,8 +23,8 @@ class TaskConstraints(Model): :param retention_time: The minimum time to retain the task directory on the compute node where it ran, from the time it completes execution. After this time, the Batch service may delete the task directory and all its - contents. The default is infinite, i.e. the task directory will be - retained until the compute node is removed or reimaged. + contents. The default is 7 days, i.e. the task directory will be retained + for 7 days unless the compute node is removed or the job is deleted. :type retention_time: timedelta :param max_task_retry_count: The maximum number of times the task may be retried. The Batch service retries a task if its exit code is nonzero. @@ -34,9 +34,7 @@ class TaskConstraints(Model): maximum retry count is 3, Batch tries the task up to 4 times (one initial try and 3 retries). If the maximum retry count is 0, the Batch service does not retry the task after the first attempt. If the maximum retry - count is -1, the Batch service retries the task without limit. Resource - files and application packages are only downloaded again if the task is - retried on a new compute node. + count is -1, the Batch service retries the task without limit. :type max_task_retry_count: int """ diff --git a/azure-batch/azure/batch/models/user_account.py b/azure-batch/azure/batch/models/user_account.py index e630e5ec5df0..419ff6149b61 100644 --- a/azure-batch/azure/batch/models/user_account.py +++ b/azure-batch/azure/batch/models/user_account.py @@ -29,6 +29,12 @@ class UserAccount(Model): the user account. This property is ignored if specified on a Windows pool. If not specified, the user is created with the default options. :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user account. This property can only be specified if the user is + on a Windows pool. If not specified and on a Windows pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration """ _validation = { @@ -41,6 +47,7 @@ class UserAccount(Model): 'password': {'key': 'password', 'type': 'str'}, 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, } def __init__(self, **kwargs): @@ -49,3 +56,4 @@ def __init__(self, **kwargs): self.password = kwargs.get('password', None) self.elevation_level = kwargs.get('elevation_level', None) self.linux_user_configuration = kwargs.get('linux_user_configuration', None) + self.windows_user_configuration = kwargs.get('windows_user_configuration', None) diff --git a/azure-batch/azure/batch/models/user_account_py3.py b/azure-batch/azure/batch/models/user_account_py3.py index 33a2d3690064..875bdb945ee5 100644 --- a/azure-batch/azure/batch/models/user_account_py3.py +++ b/azure-batch/azure/batch/models/user_account_py3.py @@ -29,6 +29,12 @@ class UserAccount(Model): the user account. This property is ignored if specified on a Windows pool. If not specified, the user is created with the default options. :type linux_user_configuration: ~azure.batch.models.LinuxUserConfiguration + :param windows_user_configuration: The Windows-specific user configuration + for the user account. This property can only be specified if the user is + on a Windows pool. If not specified and on a Windows pool, the user is + created with the default options. + :type windows_user_configuration: + ~azure.batch.models.WindowsUserConfiguration """ _validation = { @@ -41,11 +47,13 @@ class UserAccount(Model): 'password': {'key': 'password', 'type': 'str'}, 'elevation_level': {'key': 'elevationLevel', 'type': 'ElevationLevel'}, 'linux_user_configuration': {'key': 'linuxUserConfiguration', 'type': 'LinuxUserConfiguration'}, + 'windows_user_configuration': {'key': 'windowsUserConfiguration', 'type': 'WindowsUserConfiguration'}, } - def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, **kwargs) -> None: + def __init__(self, *, name: str, password: str, elevation_level=None, linux_user_configuration=None, windows_user_configuration=None, **kwargs) -> None: super(UserAccount, self).__init__(**kwargs) self.name = name self.password = password self.elevation_level = elevation_level self.linux_user_configuration = linux_user_configuration + self.windows_user_configuration = windows_user_configuration diff --git a/azure-batch/azure/batch/models/user_identity.py b/azure-batch/azure/batch/models/user_identity.py index b75dfd7306b3..1301c24e4418 100644 --- a/azure-batch/azure/batch/models/user_identity.py +++ b/azure-batch/azure/batch/models/user_identity.py @@ -15,10 +15,7 @@ class UserIdentity(Model): """The definition of the user identity under which the task is run. - Specify either the userName or autoUser property, but not both. On - CloudServiceConfiguration pools, this user is logged in with the - INTERACTIVE flag. On Windows VirtualMachineConfiguration pools, this user - is logged in with the BATCH flag. + Specify either the userName or autoUser property, but not both. :param user_name: The name of the user identity under which the task is run. The userName and autoUser properties are mutually exclusive; you must diff --git a/azure-batch/azure/batch/models/user_identity_py3.py b/azure-batch/azure/batch/models/user_identity_py3.py index e566f58cd51c..379928526110 100644 --- a/azure-batch/azure/batch/models/user_identity_py3.py +++ b/azure-batch/azure/batch/models/user_identity_py3.py @@ -15,10 +15,7 @@ class UserIdentity(Model): """The definition of the user identity under which the task is run. - Specify either the userName or autoUser property, but not both. On - CloudServiceConfiguration pools, this user is logged in with the - INTERACTIVE flag. On Windows VirtualMachineConfiguration pools, this user - is logged in with the BATCH flag. + Specify either the userName or autoUser property, but not both. :param user_name: The name of the user identity under which the task is run. The userName and autoUser properties are mutually exclusive; you must diff --git a/azure-batch/azure/batch/models/virtual_machine_configuration.py b/azure-batch/azure/batch/models/virtual_machine_configuration.py index ebf7f7d6b817..12b5714b4e63 100644 --- a/azure-batch/azure/batch/models/virtual_machine_configuration.py +++ b/azure-batch/azure/batch/models/virtual_machine_configuration.py @@ -21,9 +21,6 @@ class VirtualMachineConfiguration(Model): :param image_reference: Required. A reference to the Azure Virtual Machines Marketplace image or the custom Virtual Machine image to use. :type image_reference: ~azure.batch.models.ImageReference - :param os_disk: Settings for the operating system disk of the Virtual - Machine. - :type os_disk: ~azure.batch.models.OSDisk :param node_agent_sku_id: Required. The SKU of the Batch node agent to be provisioned on compute nodes in the pool. The Batch node agent is a program that runs on each node in the pool, and provides the @@ -36,10 +33,10 @@ class VirtualMachineConfiguration(Model): :type node_agent_sku_id: str :param windows_configuration: Windows operating system settings on the virtual machine. This property must not be specified if the imageReference - or osDisk property specifies a Linux OS image. + property specifies a Linux OS image. :type windows_configuration: ~azure.batch.models.WindowsConfiguration :param data_disks: The configuration for data disks attached to the - compute nodes in the pool. This property must be specified if the compute + comptue nodes in the pool. This property must be specified if the compute nodes in the pool need to have empty data disks attached to them. This cannot be updated. Each node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. @@ -73,7 +70,6 @@ class VirtualMachineConfiguration(Model): _attribute_map = { 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'os_disk': {'key': 'osDisk', 'type': 'OSDisk'}, 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, @@ -84,7 +80,6 @@ class VirtualMachineConfiguration(Model): def __init__(self, **kwargs): super(VirtualMachineConfiguration, self).__init__(**kwargs) self.image_reference = kwargs.get('image_reference', None) - self.os_disk = kwargs.get('os_disk', None) self.node_agent_sku_id = kwargs.get('node_agent_sku_id', None) self.windows_configuration = kwargs.get('windows_configuration', None) self.data_disks = kwargs.get('data_disks', None) diff --git a/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py b/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py index c38af5c78a29..133e7b702694 100644 --- a/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py +++ b/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py @@ -21,9 +21,6 @@ class VirtualMachineConfiguration(Model): :param image_reference: Required. A reference to the Azure Virtual Machines Marketplace image or the custom Virtual Machine image to use. :type image_reference: ~azure.batch.models.ImageReference - :param os_disk: Settings for the operating system disk of the Virtual - Machine. - :type os_disk: ~azure.batch.models.OSDisk :param node_agent_sku_id: Required. The SKU of the Batch node agent to be provisioned on compute nodes in the pool. The Batch node agent is a program that runs on each node in the pool, and provides the @@ -36,10 +33,10 @@ class VirtualMachineConfiguration(Model): :type node_agent_sku_id: str :param windows_configuration: Windows operating system settings on the virtual machine. This property must not be specified if the imageReference - or osDisk property specifies a Linux OS image. + property specifies a Linux OS image. :type windows_configuration: ~azure.batch.models.WindowsConfiguration :param data_disks: The configuration for data disks attached to the - compute nodes in the pool. This property must be specified if the compute + comptue nodes in the pool. This property must be specified if the compute nodes in the pool need to have empty data disks attached to them. This cannot be updated. Each node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. @@ -73,7 +70,6 @@ class VirtualMachineConfiguration(Model): _attribute_map = { 'image_reference': {'key': 'imageReference', 'type': 'ImageReference'}, - 'os_disk': {'key': 'osDisk', 'type': 'OSDisk'}, 'node_agent_sku_id': {'key': 'nodeAgentSKUId', 'type': 'str'}, 'windows_configuration': {'key': 'windowsConfiguration', 'type': 'WindowsConfiguration'}, 'data_disks': {'key': 'dataDisks', 'type': '[DataDisk]'}, @@ -81,10 +77,9 @@ class VirtualMachineConfiguration(Model): 'container_configuration': {'key': 'containerConfiguration', 'type': 'ContainerConfiguration'}, } - def __init__(self, *, image_reference, node_agent_sku_id: str, os_disk=None, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, **kwargs) -> None: + def __init__(self, *, image_reference, node_agent_sku_id: str, windows_configuration=None, data_disks=None, license_type: str=None, container_configuration=None, **kwargs) -> None: super(VirtualMachineConfiguration, self).__init__(**kwargs) self.image_reference = image_reference - self.os_disk = os_disk self.node_agent_sku_id = node_agent_sku_id self.windows_configuration = windows_configuration self.data_disks = data_disks diff --git a/azure-batch/azure/batch/models/windows_user_configuration.py b/azure-batch/azure/batch/models/windows_user_configuration.py new file mode 100644 index 000000000000..a895e80519b7 --- /dev/null +++ b/azure-batch/azure/batch/models/windows_user_configuration.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user account on a Windows node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration pools is batch and for + CloudServiceConfiguration pools is interactive. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, **kwargs): + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = kwargs.get('login_mode', None) diff --git a/azure-batch/azure/batch/models/windows_user_configuration_py3.py b/azure-batch/azure/batch/models/windows_user_configuration_py3.py new file mode 100644 index 000000000000..8917792831a3 --- /dev/null +++ b/azure-batch/azure/batch/models/windows_user_configuration_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class WindowsUserConfiguration(Model): + """Properties used to create a user account on a Windows node. + + :param login_mode: The login mode for the user. The default value for + VirtualMachineConfiguration pools is batch and for + CloudServiceConfiguration pools is interactive. Possible values include: + 'batch', 'interactive' + :type login_mode: str or ~azure.batch.models.LoginMode + """ + + _attribute_map = { + 'login_mode': {'key': 'loginMode', 'type': 'LoginMode'}, + } + + def __init__(self, *, login_mode=None, **kwargs) -> None: + super(WindowsUserConfiguration, self).__init__(**kwargs) + self.login_mode = login_mode diff --git a/azure-batch/azure/batch/operations/account_operations.py b/azure-batch/azure/batch/operations/account_operations.py index efd3a9e51e8c..a2f9a08baaad 100644 --- a/azure-batch/azure/batch/operations/account_operations.py +++ b/azure-batch/azure/batch/operations/account_operations.py @@ -22,7 +22,7 @@ class AccountOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -79,6 +79,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_node_agent_skus.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -173,6 +177,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_pool_node_counts.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} diff --git a/azure-batch/azure/batch/operations/application_operations.py b/azure-batch/azure/batch/operations/application_operations.py index 864d2ff12226..c2353c6993ff 100644 --- a/azure-batch/azure/batch/operations/application_operations.py +++ b/azure-batch/azure/batch/operations/application_operations.py @@ -22,7 +22,7 @@ class ApplicationOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -82,6 +82,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -174,6 +178,7 @@ def get( # Construct URL url = self.get.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'applicationId': self._serialize.url("application_id", application_id, 'str') } url = self._client.format_url(url, **path_format_arguments) diff --git a/azure-batch/azure/batch/operations/certificate_operations.py b/azure-batch/azure/batch/operations/certificate_operations.py index d4bdafb94a7b..0d069ece8fc3 100644 --- a/azure-batch/azure/batch/operations/certificate_operations.py +++ b/azure-batch/azure/batch/operations/certificate_operations.py @@ -22,7 +22,7 @@ class CertificateOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -71,6 +71,10 @@ def add( # Construct URL url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -163,6 +167,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -264,6 +272,7 @@ def cancel_deletion( # Construct URL url = self.cancel_deletion.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') } @@ -360,6 +369,7 @@ def delete( # Construct URL url = self.delete.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') } @@ -447,6 +457,7 @@ def get( # Construct URL url = self.get.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'thumbprintAlgorithm': self._serialize.url("thumbprint_algorithm", thumbprint_algorithm, 'str'), 'thumbprint': self._serialize.url("thumbprint", thumbprint, 'str') } diff --git a/azure-batch/azure/batch/operations/compute_node_operations.py b/azure-batch/azure/batch/operations/compute_node_operations.py index e88836e7511e..008b68278948 100644 --- a/azure-batch/azure/batch/operations/compute_node_operations.py +++ b/azure-batch/azure/batch/operations/compute_node_operations.py @@ -22,7 +22,7 @@ class ComputeNodeOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -80,6 +80,7 @@ def add_user( # Construct URL url = self.add_user.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -173,6 +174,7 @@ def delete_user( # Construct URL url = self.delete_user.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str'), 'userName': self._serialize.url("user_name", user_name, 'str') @@ -267,6 +269,7 @@ def update_user( # Construct URL url = self.update_user.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str'), 'userName': self._serialize.url("user_name", user_name, 'str') @@ -360,6 +363,7 @@ def get( # Construct URL url = self.get.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -465,6 +469,7 @@ def reboot( # Construct URL url = self.reboot.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -568,6 +573,7 @@ def reimage( # Construct URL url = self.reimage.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -671,6 +677,7 @@ def disable_scheduling( # Construct URL url = self.disable_scheduling.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -765,6 +772,7 @@ def enable_scheduling( # Construct URL url = self.enable_scheduling.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -857,6 +865,7 @@ def get_remote_login_settings( # Construct URL url = self.get_remote_login_settings.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -960,6 +969,7 @@ def get_remote_desktop( # Construct URL url = self.get_remote_desktop.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -1065,6 +1075,7 @@ def upload_batch_service_logs( # Construct URL url = self.upload_batch_service_logs.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } @@ -1170,6 +1181,7 @@ def internal_paging(next_link=None, raw=False): # Construct URL url = self.list.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) diff --git a/azure-batch/azure/batch/operations/file_operations.py b/azure-batch/azure/batch/operations/file_operations.py index cf997c3bf42c..eb681af147fc 100644 --- a/azure-batch/azure/batch/operations/file_operations.py +++ b/azure-batch/azure/batch/operations/file_operations.py @@ -22,7 +22,7 @@ class FileOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -84,6 +84,7 @@ def delete_from_task( # Construct URL url = self.delete_from_task.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str'), 'filePath': self._serialize.url("file_path", file_path, 'str') @@ -184,6 +185,7 @@ def get_from_task( # Construct URL url = self.get_from_task.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str'), 'filePath': self._serialize.url("file_path", file_path, 'str') @@ -299,6 +301,7 @@ def get_properties_from_task( # Construct URL url = self.get_properties_from_task.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str'), 'filePath': self._serialize.url("file_path", file_path, 'str') @@ -402,6 +405,7 @@ def delete_from_compute_node( # Construct URL url = self.delete_from_compute_node.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str'), 'filePath': self._serialize.url("file_path", file_path, 'str') @@ -502,6 +506,7 @@ def get_from_compute_node( # Construct URL url = self.get_from_compute_node.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str'), 'filePath': self._serialize.url("file_path", file_path, 'str') @@ -616,6 +621,7 @@ def get_properties_from_compute_node( # Construct URL url = self.get_properties_from_compute_node.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str'), 'filePath': self._serialize.url("file_path", file_path, 'str') @@ -723,6 +729,7 @@ def internal_paging(next_link=None, raw=False): # Construct URL url = self.list_from_task.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str') } @@ -832,6 +839,7 @@ def internal_paging(next_link=None, raw=False): # Construct URL url = self.list_from_compute_node.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str'), 'nodeId': self._serialize.url("node_id", node_id, 'str') } diff --git a/azure-batch/azure/batch/operations/job_operations.py b/azure-batch/azure/batch/operations/job_operations.py index 7ef11c40edb4..6ff090808c23 100644 --- a/azure-batch/azure/batch/operations/job_operations.py +++ b/azure-batch/azure/batch/operations/job_operations.py @@ -22,7 +22,7 @@ class JobOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -77,6 +77,10 @@ def get_all_lifetime_statistics( # Construct URL url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -183,6 +187,7 @@ def delete( # Construct URL url = self.delete.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -285,6 +290,7 @@ def get( # Construct URL url = self.get.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -402,6 +408,7 @@ def patch( # Construct URL url = self.patch.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -511,6 +518,7 @@ def update( # Construct URL url = self.update.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -627,6 +635,7 @@ def disable( # Construct URL url = self.disable.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -691,8 +700,9 @@ def enable( enabling state. After the this operation is completed, the job moves to the active state, and scheduling of new tasks under the job resumes. The Batch service does not allow a task to remain in the active state - for more than 7 days. Therefore, if you enable a job containing active - tasks which were added more than 7 days ago, those tasks will not run. + for more than 180 days. Therefore, if you enable a job containing + active tasks which were added more than 180 days ago, those tasks will + not run. :param job_id: The ID of the job to enable. :type job_id: str @@ -736,6 +746,7 @@ def enable( # Construct URL url = self.enable.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -848,6 +859,7 @@ def terminate( # Construct URL url = self.terminate.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -951,6 +963,10 @@ def add( # Construct URL url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -1043,6 +1059,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -1151,6 +1171,7 @@ def internal_paging(next_link=None, raw=False): # Construct URL url = self.list_from_job_schedule.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1267,6 +1288,7 @@ def internal_paging(next_link=None, raw=False): # Construct URL url = self.list_preparation_and_release_task_status.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1364,6 +1386,7 @@ def get_task_counts( # Construct URL url = self.get_task_counts.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) diff --git a/azure-batch/azure/batch/operations/job_schedule_operations.py b/azure-batch/azure/batch/operations/job_schedule_operations.py index e0909b5cf47b..a186cf136939 100644 --- a/azure-batch/azure/batch/operations/job_schedule_operations.py +++ b/azure-batch/azure/batch/operations/job_schedule_operations.py @@ -22,7 +22,7 @@ class JobScheduleOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -85,6 +85,7 @@ def exists( # Construct URL url = self.exists.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -193,6 +194,7 @@ def delete( # Construct URL url = self.delete.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -297,6 +299,7 @@ def get( # Construct URL url = self.get.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -420,6 +423,7 @@ def patch( # Construct URL url = self.patch.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -534,6 +538,7 @@ def update( # Construct URL url = self.update.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -640,6 +645,7 @@ def disable( # Construct URL url = self.disable.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -740,6 +746,7 @@ def enable( # Construct URL url = self.enable.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -840,6 +847,7 @@ def terminate( # Construct URL url = self.terminate.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobScheduleId': self._serialize.url("job_schedule_id", job_schedule_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -927,6 +935,10 @@ def add( # Construct URL url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -1021,6 +1033,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} diff --git a/azure-batch/azure/batch/operations/pool_operations.py b/azure-batch/azure/batch/operations/pool_operations.py index 3792ae2113f6..9e7253d1e7a9 100644 --- a/azure-batch/azure/batch/operations/pool_operations.py +++ b/azure-batch/azure/batch/operations/pool_operations.py @@ -22,7 +22,7 @@ class PoolOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -93,6 +93,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_usage_metrics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -189,6 +193,10 @@ def get_all_lifetime_statistics( # Construct URL url = self.get_all_lifetime_statistics.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -276,6 +284,10 @@ def add( # Construct URL url = self.add.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -368,6 +380,10 @@ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] + path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} @@ -483,6 +499,7 @@ def delete( # Construct URL url = self.delete.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -578,6 +595,7 @@ def exists( # Construct URL url = self.exists.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -684,6 +702,7 @@ def get( # Construct URL url = self.get.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -802,6 +821,7 @@ def patch( # Construct URL url = self.patch.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -895,6 +915,7 @@ def disable_auto_scale( # Construct URL url = self.disable_auto_scale.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1016,6 +1037,7 @@ def enable_auto_scale( # Construct URL url = self.enable_auto_scale.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1123,6 +1145,7 @@ def evaluate_auto_scale( # Construct URL url = self.evaluate_auto_scale.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1238,6 +1261,7 @@ def resize( # Construct URL url = self.resize.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1351,6 +1375,7 @@ def stop_resize( # Construct URL url = self.stop_resize.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1448,6 +1473,7 @@ def update_properties( # Construct URL url = self.update_properties.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -1496,133 +1522,6 @@ def update_properties( return client_raw_response update_properties.metadata = {'url': '/pools/{poolId}/updateproperties'} - def upgrade_os( - self, pool_id, target_os_version, pool_upgrade_os_options=None, custom_headers=None, raw=False, **operation_config): - """Upgrades the operating system of the specified pool. - - During an upgrade, the Batch service upgrades each compute node in the - pool. When a compute node is chosen for upgrade, any tasks running on - that node are removed from the node and returned to the queue to be - rerun later (or on a different compute node). The node will be - unavailable until the upgrade is complete. This operation results in - temporarily reduced pool capacity as nodes are taken out of service to - be upgraded. Although the Batch service tries to avoid upgrading all - compute nodes at the same time, it does not guarantee to do this - (particularly on small pools); therefore, the pool may be temporarily - unavailable to run tasks. When this operation runs, the pool state - changes to upgrading. When all compute nodes have finished upgrading, - the pool state returns to active. While the upgrade is in progress, the - pool's currentOSVersion reflects the OS version that nodes are - upgrading from, and targetOSVersion reflects the OS version that nodes - are upgrading to. Once the upgrade is complete, currentOSVersion is - updated to reflect the OS version now running on all nodes. This - operation can only be invoked on pools created with the - cloudServiceConfiguration property. - - :param pool_id: The ID of the pool to upgrade. - :type pool_id: str - :param target_os_version: The Azure Guest OS version to be installed - on the virtual machines in the pool. - :type target_os_version: str - :param pool_upgrade_os_options: Additional parameters for the - operation - :type pool_upgrade_os_options: - ~azure.batch.models.PoolUpgradeOsOptions - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`BatchErrorException` - """ - timeout = None - if pool_upgrade_os_options is not None: - timeout = pool_upgrade_os_options.timeout - client_request_id = None - if pool_upgrade_os_options is not None: - client_request_id = pool_upgrade_os_options.client_request_id - return_client_request_id = None - if pool_upgrade_os_options is not None: - return_client_request_id = pool_upgrade_os_options.return_client_request_id - ocp_date = None - if pool_upgrade_os_options is not None: - ocp_date = pool_upgrade_os_options.ocp_date - if_match = None - if pool_upgrade_os_options is not None: - if_match = pool_upgrade_os_options.if_match - if_none_match = None - if pool_upgrade_os_options is not None: - if_none_match = pool_upgrade_os_options.if_none_match - if_modified_since = None - if pool_upgrade_os_options is not None: - if_modified_since = pool_upgrade_os_options.if_modified_since - if_unmodified_since = None - if pool_upgrade_os_options is not None: - if_unmodified_since = pool_upgrade_os_options.if_unmodified_since - pool_upgrade_os_parameter = models.PoolUpgradeOSParameter(target_os_version=target_os_version) - - # Construct URL - url = self.upgrade_os.metadata['url'] - path_format_arguments = { - 'poolId': self._serialize.url("pool_id", pool_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; odata=minimalmetadata; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - if client_request_id is not None: - header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str') - if return_client_request_id is not None: - header_parameters['return-client-request-id'] = self._serialize.header("return_client_request_id", return_client_request_id, 'bool') - if ocp_date is not None: - header_parameters['ocp-date'] = self._serialize.header("ocp_date", ocp_date, 'rfc-1123') - if if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') - if if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') - if if_modified_since is not None: - header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') - if if_unmodified_since is not None: - header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') - - # Construct body - body_content = self._serialize.body(pool_upgrade_os_parameter, 'PoolUpgradeOSParameter') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.BatchErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'client-request-id': 'str', - 'request-id': 'str', - 'ETag': 'str', - 'Last-Modified': 'rfc-1123', - 'DataServiceId': 'str', - }) - return client_raw_response - upgrade_os.metadata = {'url': '/pools/{poolId}/upgradeos'} - def remove_nodes( self, pool_id, node_remove_parameter, pool_remove_nodes_options=None, custom_headers=None, raw=False, **operation_config): """Removes compute nodes from the specified pool. @@ -1678,6 +1577,7 @@ def remove_nodes( # Construct URL url = self.remove_nodes.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'poolId': self._serialize.url("pool_id", pool_id, 'str') } url = self._client.format_url(url, **path_format_arguments) diff --git a/azure-batch/azure/batch/operations/task_operations.py b/azure-batch/azure/batch/operations/task_operations.py index 526187533a57..fb1ae6481dde 100644 --- a/azure-batch/azure/batch/operations/task_operations.py +++ b/azure-batch/azure/batch/operations/task_operations.py @@ -22,7 +22,7 @@ class TaskOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API Version. Constant value: "2018-08-01.7.0". + :ivar api_version: Client API Version. Constant value: "2018-12-01.8.0". """ models = models @@ -32,7 +32,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-08-01.7.0" + self.api_version = "2018-12-01.8.0" self.config = config @@ -40,8 +40,8 @@ def add( self, job_id, task, task_add_options=None, custom_headers=None, raw=False, **operation_config): """Adds a task to the specified job. - The maximum lifetime of a task from addition to completion is 7 days. - If a task has not completed within 7 days of being added it will be + The maximum lifetime of a task from addition to completion is 180 days. + If a task has not completed within 180 days of being added it will be terminated by the Batch service and left in whatever state it was in at that time. @@ -77,6 +77,7 @@ def add( # Construct URL url = self.add.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -179,6 +180,7 @@ def internal_paging(next_link=None, raw=False): # Construct URL url = self.list.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -253,8 +255,8 @@ def add_collection( tasks which failed to add, a client can retry the request. In a retry, it is most efficient to resubmit only tasks that failed to add, and to omit tasks that were successfully added on the first attempt. The - maximum lifetime of a task from addition to completion is 7 days. If a - task has not completed within 7 days of being added it will be + maximum lifetime of a task from addition to completion is 180 days. If + a task has not completed within 180 days of being added it will be terminated by the Batch service and left in whatever state it was in at that time. @@ -300,6 +302,7 @@ def add_collection( # Construct URL url = self.add_collection.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -409,6 +412,7 @@ def delete( # Construct URL url = self.delete.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str') } @@ -518,6 +522,7 @@ def get( # Construct URL url = self.get.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str') } @@ -639,6 +644,7 @@ def update( # Construct URL url = self.update.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str') } @@ -742,6 +748,7 @@ def list_subtasks( # Construct URL url = self.list_subtasks.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str') } @@ -851,6 +858,7 @@ def terminate( # Construct URL url = self.terminate.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str') } @@ -964,6 +972,7 @@ def reactivate( # Construct URL url = self.reactivate.metadata['url'] path_format_arguments = { + 'batchUrl': self._serialize.url("self.config.batch_url", self.config.batch_url, 'str', skip_quote=True), 'jobId': self._serialize.url("job_id", job_id, 'str'), 'taskId': self._serialize.url("task_id", task_id, 'str') } diff --git a/azure-batch/azure/batch/version.py b/azure-batch/azure/batch/version.py index f24f038f478b..84a99a3c866b 100644 --- a/azure-batch/azure/batch/version.py +++ b/azure-batch/azure/batch/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "2018-08-01.7.0" +VERSION = "2018-12-01.8.0" diff --git a/azure-batch/build.json b/azure-batch/build.json index e4a1aef9625a..1c71a6016194 100644 --- a/azure-batch/build.json +++ b/azure-batch/build.json @@ -74,13 +74,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -162,13 +162,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -253,13 +253,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -344,13 +344,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -435,13 +435,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -526,13 +526,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -615,13 +615,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -701,13 +701,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -786,13 +786,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -871,13 +871,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -956,13 +956,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1041,13 +1041,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1126,13 +1126,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1211,13 +1211,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1281,13 +1281,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1364,13 +1364,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1480,13 +1480,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.6033.205377309443.personal-lock", + "name": "_root_.autorest.4492.570163087388.personal-lock", "options": { - "port": 24073, - "host": "2130710678", + "port": 16235, + "host": "2130720978", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.6033.205377309443.personal-lock:24073" + "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" }, "file": "/tmp/_root_.autorest.lock" }, From fbc00a569846f7fd24eae73b6ecbe078f40de3ad Mon Sep 17 00:00:00 2001 From: Azure SDK for Python bot Date: Mon, 17 Dec 2018 11:12:01 -0800 Subject: [PATCH 3/3] [AutoPR batch/data-plane] typo: batch/data-plane/Microsoft.Batch (#4074) * Generated from bd52bfb6794d7c79ecadba8b49686079fad40230 typo: batch/data-plane/Microsoft.Batch - comptue -> compute - pre-empted -> preempted * Generated from bd52bfb6794d7c79ecadba8b49686079fad40230 typo: batch/data-plane/Microsoft.Batch - comptue -> compute - pre-empted -> preempted --- .../models/batch_service_client_enums.py | 2 +- .../azure/batch/models/compute_node.py | 2 +- .../azure/batch/models/compute_node_py3.py | 2 +- .../models/virtual_machine_configuration.py | 2 +- .../virtual_machine_configuration_py3.py | 2 +- azure-batch/build.json | 136 +++++++++--------- 6 files changed, 73 insertions(+), 73 deletions(-) diff --git a/azure-batch/azure/batch/models/batch_service_client_enums.py b/azure-batch/azure/batch/models/batch_service_client_enums.py index 9c04014cfe91..83ee302f5ce5 100644 --- a/azure-batch/azure/batch/models/batch_service_client_enums.py +++ b/azure-batch/azure/batch/models/batch_service_client_enums.py @@ -241,7 +241,7 @@ class ComputeNodeState(str, Enum): unknown = "unknown" #: The Batch service has lost contact with the node, and does not know its true state. leaving_pool = "leavingpool" #: The node is leaving the pool, either because the user explicitly removed it or because the pool is resizing or autoscaling down. offline = "offline" #: The node is not currently running a task, and scheduling of new tasks to the node is disabled. - preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was pre-empted will be rescheduled when another node becomes available. + preempted = "preempted" #: The low-priority node has been preempted. Tasks which were running on the node when it was preempted will be rescheduled when another node becomes available. class SchedulingState(str, Enum): diff --git a/azure-batch/azure/batch/models/compute_node.py b/azure-batch/azure/batch/models/compute_node.py index 0321d6e73ead..1b5119638ec9 100644 --- a/azure-batch/azure/batch/models/compute_node.py +++ b/azure-batch/azure/batch/models/compute_node.py @@ -24,7 +24,7 @@ class ComputeNode(Model): :type url: str :param state: The current state of the compute node. The low-priority node has been preempted. Tasks which were running on the node when it was - pre-empted will be rescheduled when another node becomes available. + preempted will be rescheduled when another node becomes available. Possible values include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' diff --git a/azure-batch/azure/batch/models/compute_node_py3.py b/azure-batch/azure/batch/models/compute_node_py3.py index dff5d069add2..be5eebf25df6 100644 --- a/azure-batch/azure/batch/models/compute_node_py3.py +++ b/azure-batch/azure/batch/models/compute_node_py3.py @@ -24,7 +24,7 @@ class ComputeNode(Model): :type url: str :param state: The current state of the compute node. The low-priority node has been preempted. Tasks which were running on the node when it was - pre-empted will be rescheduled when another node becomes available. + preempted will be rescheduled when another node becomes available. Possible values include: 'idle', 'rebooting', 'reimaging', 'running', 'unusable', 'creating', 'starting', 'waitingForStartTask', 'startTaskFailed', 'unknown', 'leavingPool', 'offline', 'preempted' diff --git a/azure-batch/azure/batch/models/virtual_machine_configuration.py b/azure-batch/azure/batch/models/virtual_machine_configuration.py index 12b5714b4e63..592d8009b968 100644 --- a/azure-batch/azure/batch/models/virtual_machine_configuration.py +++ b/azure-batch/azure/batch/models/virtual_machine_configuration.py @@ -36,7 +36,7 @@ class VirtualMachineConfiguration(Model): property specifies a Linux OS image. :type windows_configuration: ~azure.batch.models.WindowsConfiguration :param data_disks: The configuration for data disks attached to the - comptue nodes in the pool. This property must be specified if the compute + compute nodes in the pool. This property must be specified if the compute nodes in the pool need to have empty data disks attached to them. This cannot be updated. Each node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. diff --git a/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py b/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py index 133e7b702694..b9ca8b268dcb 100644 --- a/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py +++ b/azure-batch/azure/batch/models/virtual_machine_configuration_py3.py @@ -36,7 +36,7 @@ class VirtualMachineConfiguration(Model): property specifies a Linux OS image. :type windows_configuration: ~azure.batch.models.WindowsConfiguration :param data_disks: The configuration for data disks attached to the - comptue nodes in the pool. This property must be specified if the compute + compute nodes in the pool. This property must be specified if the compute nodes in the pool need to have empty data disks attached to them. This cannot be updated. Each node gets its own disk (the disk is not a file share). Existing disks cannot be attached, each attached disk is empty. diff --git a/azure-batch/build.json b/azure-batch/build.json index 1c71a6016194..35cabb2471e9 100644 --- a/azure-batch/build.json +++ b/azure-batch/build.json @@ -74,13 +74,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -162,13 +162,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -253,13 +253,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -344,13 +344,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -435,13 +435,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -526,13 +526,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -615,13 +615,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -701,13 +701,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -786,13 +786,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -871,13 +871,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -956,13 +956,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1041,13 +1041,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1126,13 +1126,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1211,13 +1211,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1281,13 +1281,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1364,13 +1364,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" }, @@ -1480,13 +1480,13 @@ "pipe": "/tmp/pipe__root_.autorest.busy-lock:37199" }, "personalLock": { - "name": "_root_.autorest.4492.570163087388.personal-lock", + "name": "_root_.autorest.2887.972548981721.personal-lock", "options": { - "port": 16235, - "host": "2130720978", + "port": 29714, + "host": "2130746708", "exclusive": true }, - "pipe": "/tmp/pipe__root_.autorest.4492.570163087388.personal-lock:16235" + "pipe": "/tmp/pipe__root_.autorest.2887.972548981721.personal-lock:29714" }, "file": "/tmp/_root_.autorest.lock" },