diff --git a/lib/services/batchaiManagement/lib/batchAIManagementClient.d.ts b/lib/services/batchaiManagement/lib/batchAIManagementClient.d.ts index 7daaa5120e..8e159bfca8 100644 --- a/lib/services/batchaiManagement/lib/batchAIManagementClient.d.ts +++ b/lib/services/batchaiManagement/lib/batchAIManagementClient.d.ts @@ -58,11 +58,11 @@ export default class BatchAIManagementClient extends AzureServiceClient { // Operation groups operations: operations.Operations; usages: operations.Usages; - clusters: operations.Clusters; - fileServers: operations.FileServers; workspaces: operations.Workspaces; experiments: operations.Experiments; jobs: operations.Jobs; + fileServers: operations.FileServers; + clusters: operations.Clusters; } export { BatchAIManagementClient, models as BatchAIManagementModels }; diff --git a/lib/services/batchaiManagement/lib/batchAIManagementClient.js b/lib/services/batchaiManagement/lib/batchAIManagementClient.js index aa61d6a540..77ba91e776 100644 --- a/lib/services/batchaiManagement/lib/batchAIManagementClient.js +++ b/lib/services/batchaiManagement/lib/batchAIManagementClient.js @@ -74,11 +74,11 @@ class BatchAIManagementClient extends ServiceClient { } this.operations = new operations.Operations(this); this.usages = new operations.Usages(this); - this.clusters = new operations.Clusters(this); - this.fileServers = new operations.FileServers(this); this.workspaces = new operations.Workspaces(this); this.experiments = new operations.Experiments(this); this.jobs = new operations.Jobs(this); + this.fileServers = new operations.FileServers(this); + this.clusters = new operations.Clusters(this); this.models = models; msRest.addSerializationMixin(this); } diff --git a/lib/services/batchaiManagement/lib/models/appInsightsReference.js b/lib/services/batchaiManagement/lib/models/appInsightsReference.js index 7ef62d1295..c060e77c76 100644 --- a/lib/services/batchaiManagement/lib/models/appInsightsReference.js +++ b/lib/services/batchaiManagement/lib/models/appInsightsReference.js @@ -13,27 +13,27 @@ const models = require('./index'); /** - * Specifies Azure Application Insights information for performance counters - * reporting. + * Azure Application Insights information for performance counters reporting. * */ class AppInsightsReference { /** * Create a AppInsightsReference. - * @member {object} component Specifies the Azure Application Insights - * component resource id. + * @member {object} component Component ID. Azure Application Insights + * component resource ID. * @member {string} [component.id] The ID of the resource - * @member {string} [instrumentationKey] Value of the Azure Application - * Insights instrumentation key. - * @member {object} [instrumentationKeySecretReference] Specifies a KeyVault - * Secret containing Azure Application Insights instrumentation key. - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * @member {string} [instrumentationKey] Instrumentation Key. Value of the + * Azure Application Insights instrumentation key. + * @member {object} [instrumentationKeySecretReference] Instrumentation key + * KeyVault Secret reference. KeyVault Store and Secret which contains Azure + * Application Insights instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. - * @member {object} [instrumentationKeySecretReference.sourceVault] + * @member {object} [instrumentationKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [instrumentationKeySecretReference.sourceVault.id] The ID * of the resource - * @member {string} [instrumentationKeySecretReference.secretUrl] + * @member {string} [instrumentationKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/autoScaleSettings.js b/lib/services/batchaiManagement/lib/models/autoScaleSettings.js index 9154865764..56cd3b3613 100644 --- a/lib/services/batchaiManagement/lib/models/autoScaleSettings.js +++ b/lib/services/batchaiManagement/lib/models/autoScaleSettings.js @@ -11,21 +11,23 @@ 'use strict'; /** - * The system automatically scales the cluster up and down (within - * minimumNodeCount and maximumNodeCount) based on the pending and running jobs - * on the cluster. + * Auto-scale settings for the cluster. The system automatically scales the + * cluster up and down (within minimumNodeCount and maximumNodeCount) based on + * the number of queued and running jobs assigned to the cluster. * */ class AutoScaleSettings { /** * Create a AutoScaleSettings. - * @member {number} minimumNodeCount Specifies the minimum number of compute - * nodes the cluster can have. - * @member {number} maximumNodeCount Specifies the maximum number of compute - * nodes the cluster can have. - * @member {number} [initialNodeCount] Specifies the number of compute nodes - * to allocate on cluster creation. Note that this value is used only during - * cluster creation. Default value: 0 . + * @member {number} minimumNodeCount Minimum node count. The minimum number + * of compute nodes the Batch AI service will try to allocate for the + * cluster. Note, the actual number of nodes can be less than the specified + * value if the subscription has not enough quota to fulfill the request. + * @member {number} maximumNodeCount Maximum node count. The maximum number + * of compute nodes the cluster can have. + * @member {number} [initialNodeCount] Initial node count. The number of + * compute nodes to allocate on cluster creation. Note that this value is + * used only during cluster creation. Default: 0. Default value: 0 . */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/azureBlobFileSystemReference.js b/lib/services/batchaiManagement/lib/models/azureBlobFileSystemReference.js index 5201e7a2dd..351282f559 100644 --- a/lib/services/batchaiManagement/lib/models/azureBlobFileSystemReference.js +++ b/lib/services/batchaiManagement/lib/models/azureBlobFileSystemReference.js @@ -13,35 +13,36 @@ const models = require('./index'); /** - * Provides required information, for the service to be able to mount Azure - * Blob Storage container on the cluster nodes. + * Azure Blob Storage Container mounting configuration. * */ class AzureBlobFileSystemReference { /** * Create a AzureBlobFileSystemReference. - * @member {string} accountName Name of the Azure Blob Storage account. - * @member {string} containerName Name of the Azure Blob Storage container to - * mount on the cluster. - * @member {object} credentials Information of the Azure Blob Storage account - * credentials. - * @member {string} [credentials.accountKey] One of accountKey or + * @member {string} accountName Account name. Name of the Azure storage + * account. + * @member {string} containerName Container name. Name of the Azure Blob + * Storage container to mount on the cluster. + * @member {object} credentials Credentials. Information about the Azure + * storage credentials. + * @member {string} [credentials.accountKey] Storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [credentials.accountKeySecretReference] Information about + * KeyVault secret storing the storage account key. One of accountKey or * accountKeySecretReference must be specified. - * @member {object} [credentials.accountKeySecretReference] Users can store - * their secrets in Azure KeyVault and pass it to the Batch AI Service to - * integrate with KeyVault. One of accountKey or accountKeySecretReference - * must be specified. - * @member {object} [credentials.accountKeySecretReference.sourceVault] + * @member {object} [credentials.accountKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [credentials.accountKeySecretReference.sourceVault.id] * The ID of the resource - * @member {string} [credentials.accountKeySecretReference.secretUrl] - * @member {string} relativeMountPath Specifies the relative path on the - * compute node where the Azure Blob file system will be mounted. Note that - * all cluster level blob file systems will be mounted under - * $AZ_BATCHAI_MOUNT_ROOT location and all job level blob file systems will - * be mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT. - * @member {string} [mountOptions] Specifies the various mount options that - * can be used to configure Blob file system. + * @member {string} [credentials.accountKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. + * @member {string} relativeMountPath Relative mount path. The relative path + * on the compute node where the Azure File container will be mounted. Note + * that all cluster level containers will be mounted under + * $AZ_BATCHAI_MOUNT_ROOT location and all job level containers will be + * mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT. + * @member {string} [mountOptions] Mount options. Mount options for mounting + * blobfuse file system. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/azureFileShareReference.js b/lib/services/batchaiManagement/lib/models/azureFileShareReference.js index a0a14d7b47..1cdae091e4 100644 --- a/lib/services/batchaiManagement/lib/models/azureFileShareReference.js +++ b/lib/services/batchaiManagement/lib/models/azureFileShareReference.js @@ -13,34 +13,38 @@ const models = require('./index'); /** - * Details of the Azure File Share to mount on the cluster. + * Azure File Share mounting configuration. * */ class AzureFileShareReference { /** * Create a AzureFileShareReference. - * @member {string} accountName Name of the storage account. - * @member {string} azureFileUrl URL to access the Azure File. - * @member {object} credentials Information of the Azure File credentials. - * @member {string} [credentials.accountKey] One of accountKey or + * @member {string} accountName Account name. Name of the Azure storage + * account. + * @member {string} azureFileUrl Azure File URL. URL to access the Azure + * File. + * @member {object} credentials Credentials. Information about the Azure + * storage credentials. + * @member {string} [credentials.accountKey] Storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [credentials.accountKeySecretReference] Information about + * KeyVault secret storing the storage account key. One of accountKey or * accountKeySecretReference must be specified. - * @member {object} [credentials.accountKeySecretReference] Users can store - * their secrets in Azure KeyVault and pass it to the Batch AI Service to - * integrate with KeyVault. One of accountKey or accountKeySecretReference - * must be specified. - * @member {object} [credentials.accountKeySecretReference.sourceVault] + * @member {object} [credentials.accountKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [credentials.accountKeySecretReference.sourceVault.id] * The ID of the resource - * @member {string} [credentials.accountKeySecretReference.secretUrl] - * @member {string} relativeMountPath Specifies the relative path on the - * compute node where the Azure file share will be mounted. Note that all - * cluster level file shares will be mounted under $AZ_BATCHAI_MOUNT_ROOT + * @member {string} [credentials.accountKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. + * @member {string} relativeMountPath Relative mount path. The relative path + * on the compute node where the Azure File share will be mounted. Note that + * all cluster level file shares will be mounted under $AZ_BATCHAI_MOUNT_ROOT * location and all job level file shares will be mounted under * $AZ_BATCHAI_JOB_MOUNT_ROOT. - * @member {string} [fileMode] Specifies the file mode. Default value is - * 0777. Valid only if OS is linux. Default value: '0777' . - * @member {string} [directoryMode] Specifies the directory Mode. Default - * value is 0777. Valid only if OS is linux. Default value: '0777' . + * @member {string} [fileMode] File mode. File mode for files on the mounted + * file share. Default value: 0777. Default value: '0777' . + * @member {string} [directoryMode] Directory mode. File mode for directories + * on the mounted file share. Default value: 0777. Default value: '0777' . */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/azureStorageCredentialsInfo.js b/lib/services/batchaiManagement/lib/models/azureStorageCredentialsInfo.js index dc20281e83..cb0d61e741 100644 --- a/lib/services/batchaiManagement/lib/models/azureStorageCredentialsInfo.js +++ b/lib/services/batchaiManagement/lib/models/azureStorageCredentialsInfo.js @@ -13,23 +13,23 @@ const models = require('./index'); /** - * Credentials to access Azure File Share. + * Azure storage account credentials. * */ class AzureStorageCredentialsInfo { /** * Create a AzureStorageCredentialsInfo. - * @member {string} [accountKey] Storage account key. One of accountKey or - * accountKeySecretReference must be specified. - * @member {object} [accountKeySecretReference] Specifies the location of the - * storage account key, which is a Key Vault Secret. Users can store their - * secrets in Azure KeyVault and pass it to the Batch AI Service to integrate - * with KeyVault. One of accountKey or accountKeySecretReference must be - * specified. - * @member {object} [accountKeySecretReference.sourceVault] + * @member {string} [accountKey] Account key. Storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [accountKeySecretReference] Account key secret reference. + * Information about KeyVault secret storing the storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [accountKeySecretReference.sourceVault] Fully qualified + * resource indentifier of the Key Vault. * @member {string} [accountKeySecretReference.sourceVault.id] The ID of the * resource - * @member {string} [accountKeySecretReference.secretUrl] + * @member {string} [accountKeySecretReference.secretUrl] The URL referencing + * a secret in the Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/batchAIError.js b/lib/services/batchaiManagement/lib/models/batchAIError.js index bed3b20617..56764f537f 100644 --- a/lib/services/batchaiManagement/lib/models/batchAIError.js +++ b/lib/services/batchaiManagement/lib/models/batchAIError.js @@ -19,7 +19,7 @@ const models = require('./index'); class BatchAIError { /** * Create a BatchAIError. - * @member {string} [code] An identifier for the error. Codes are invariant + * @member {string} [code] An identifier of the error. Codes are invariant * and are intended to be consumed programmatically. * @member {string} [message] A message describing the error, intended to be * suitable for display in a user interface. diff --git a/lib/services/batchaiManagement/lib/models/cNTKsettings.js b/lib/services/batchaiManagement/lib/models/cNTKsettings.js index 441dcbe55e..6fc8b67483 100644 --- a/lib/services/batchaiManagement/lib/models/cNTKsettings.js +++ b/lib/services/batchaiManagement/lib/models/cNTKsettings.js @@ -11,27 +11,29 @@ 'use strict'; /** - * Specifies the settings for CNTK (aka Microsoft Cognitive Toolkit) job. + * CNTK (aka Microsoft Cognitive Toolkit) job settings. * */ class CNTKsettings { /** * Create a CNTKsettings. - * @member {string} [languageType] Specifies the language type to use for + * @member {string} [languageType] Language type. The language to use for * launching CNTK (aka Microsoft Cognitive Toolkit) job. Valid values are * 'BrainScript' or 'Python'. - * @member {string} [configFilePath] Specifies the path of the config file. - * This property can be specified only if the languageType is 'BrainScript'. - * @member {string} [pythonScriptFilePath] The path and file name of the - * python script to execute the job. This property can be specified only if - * the languageType is 'Python'. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * This property can be specified only if the languageType is 'Python'. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script or CNTK.exe. - * @member {number} [processCount] Number of processes parameter that is - * passed to MPI runtime. The default value for this property is equal to - * nodeCount property + * @member {string} [configFilePath] Config file path. Specifies the path of + * the BrainScript config file. This property can be specified only if the + * languageType is 'BrainScript'. + * @member {string} [pythonScriptFilePath] Python script file path. Python + * script to execute. This property can be specified only if the languageType + * is 'Python'. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. This property can be specified only if the + * languageType is 'Python'. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script or cntk executable. + * @member {number} [processCount] Process count. Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/caffe2Settings.js b/lib/services/batchaiManagement/lib/models/caffe2Settings.js index b7dd02da1b..b46565ed78 100644 --- a/lib/services/batchaiManagement/lib/models/caffe2Settings.js +++ b/lib/services/batchaiManagement/lib/models/caffe2Settings.js @@ -11,17 +11,18 @@ 'use strict'; /** - * Specifies the settings for Caffe2 job. + * Caffe2 job settings. * */ class Caffe2Settings { /** * Create a Caffe2Settings. - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script. + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/caffeSettings.js b/lib/services/batchaiManagement/lib/models/caffeSettings.js index 79d7e6b89f..9691dd9e9d 100644 --- a/lib/services/batchaiManagement/lib/models/caffeSettings.js +++ b/lib/services/batchaiManagement/lib/models/caffeSettings.js @@ -11,25 +11,26 @@ 'use strict'; /** - * Specifies the settings for Caffe job. + * Caffe job settings. * */ class CaffeSettings { /** * Create a CaffeSettings. - * @member {string} [configFilePath] Specifies the path of the config file. - * This property cannot be specified if pythonScriptFilePath is specified. - * @member {string} [pythonScriptFilePath] The path and file name of the - * python script to execute the job. This property cannot be specified if - * configFilePath is specified. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * This property can be specified only if the pythonScriptFilePath is + * @member {string} [configFilePath] Config file path. Path of the config + * file for the job. This property cannot be specified if + * pythonScriptFilePath is specified. + * @member {string} [pythonScriptFilePath] Python script file path. Python + * script to execute. This property cannot be specified if configFilePath is * specified. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the Caffe job. - * @member {number} [processCount] Number of processes parameter that is - * passed to MPI runtime. The default value for this property is equal to - * nodeCount property + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. The property can be specified only if the + * pythonScriptFilePath is specified. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the Caffe job. + * @member {number} [processCount] Process count. Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/chainerSettings.js b/lib/services/batchaiManagement/lib/models/chainerSettings.js index 5ca7406e19..18113b2c73 100644 --- a/lib/services/batchaiManagement/lib/models/chainerSettings.js +++ b/lib/services/batchaiManagement/lib/models/chainerSettings.js @@ -11,20 +11,21 @@ 'use strict'; /** - * Specifies the settings for Chainer job. + * Chainer job settings. * */ class ChainerSettings { /** * Create a ChainerSettings. - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script. - * @member {number} [processCount] Number of processes parameter that is - * passed to MPI runtime. The default value for this property is equal to - * nodeCount property + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. + * @member {number} [processCount] Process count. Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/cluster.js b/lib/services/batchaiManagement/lib/models/cluster.js index 96ebc78d7c..8ded6ec991 100644 --- a/lib/services/batchaiManagement/lib/models/cluster.js +++ b/lib/services/batchaiManagement/lib/models/cluster.js @@ -13,141 +13,178 @@ const models = require('./index'); /** - * Contains information about a Cluster. + * Information about a Cluster. * - * @extends models['Resource'] + * @extends models['ProxyResource'] */ -class Cluster extends models['Resource'] { +class Cluster extends models['ProxyResource'] { /** * Create a Cluster. - * @member {string} [vmSize] The size of the virtual machines in the cluster. - * All virtual machines in a cluster are the same size. For information about - * available VM sizes for clusters using images from the Virtual Machines - * Marketplace (see Sizes for Virtual Machines (Linux) or Sizes for Virtual - * Machines (Windows). Batch AI service supports all Azure VM sizes except - * STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and - * STANDARD_DSV2 series). - * @member {string} [vmPriority] dedicated or lowpriority. The default value - * is dedicated. The node can get preempted while the task is running if - * lowpriority is choosen. This is best suited if the workload is - * checkpointing and can be restarted. Possible values include: 'dedicated', - * 'lowpriority'. Default value: 'dedicated' . - * @member {object} [scaleSettings] Desired scale for the Cluster. - * @member {object} [scaleSettings.manual] - * @member {number} [scaleSettings.manual.targetNodeCount] Default is 0. If - * autoScaleSettings are not specified, then the Cluster starts with this - * target. - * @member {string} [scaleSettings.manual.nodeDeallocationOption] The default - * value is requeue. Possible values include: 'requeue', 'terminate', + * @member {string} [vmSize] VM size. The size of the virtual machines in the + * cluster. All nodes in a cluster have the same VM size. + * @member {string} [vmPriority] VM priority. VM priority of cluster nodes. + * Possible values include: 'dedicated', 'lowpriority'. Default value: + * 'dedicated' . + * @member {object} [scaleSettings] Scale settings. Scale settings of the + * cluster. + * @member {object} [scaleSettings.manual] Manual scale settings for the + * cluster. + * @member {number} [scaleSettings.manual.targetNodeCount] The desired number + * of compute nodes in the Cluster. Default is 0. + * @member {string} [scaleSettings.manual.nodeDeallocationOption] An action + * to be performed when the cluster size is decreasing. The default value is + * requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion' - * @member {object} [scaleSettings.autoScale] - * @member {number} [scaleSettings.autoScale.minimumNodeCount] - * @member {number} [scaleSettings.autoScale.maximumNodeCount] - * @member {number} [scaleSettings.autoScale.initialNodeCount] - * @member {object} [virtualMachineConfiguration] Settings for OS image and - * mounted data volumes. - * @member {object} [virtualMachineConfiguration.imageReference] + * @member {object} [scaleSettings.autoScale] Auto-scale settings for the + * cluster. + * @member {number} [scaleSettings.autoScale.minimumNodeCount] The minimum + * number of compute nodes the Batch AI service will try to allocate for the + * cluster. Note, the actual number of nodes can be less than the specified + * value if the subscription has not enough quota to fulfill the request. + * @member {number} [scaleSettings.autoScale.maximumNodeCount] The maximum + * number of compute nodes the cluster can have. + * @member {number} [scaleSettings.autoScale.initialNodeCount] The number of + * compute nodes to allocate on cluster creation. Note that this value is + * used only during cluster creation. Default: 0. + * @member {object} [virtualMachineConfiguration] VM configuration. Virtual + * machine configuration (OS image) of the compute nodes. All nodes in a + * cluster have the same OS image configuration. + * @member {object} [virtualMachineConfiguration.imageReference] OS image + * reference for cluster nodes. * @member {string} [virtualMachineConfiguration.imageReference.publisher] - * @member {string} [virtualMachineConfiguration.imageReference.offer] - * @member {string} [virtualMachineConfiguration.imageReference.sku] + * Publisher of the image. + * @member {string} [virtualMachineConfiguration.imageReference.offer] Offer + * of the image. + * @member {string} [virtualMachineConfiguration.imageReference.sku] SKU of + * the image. * @member {string} [virtualMachineConfiguration.imageReference.version] + * Version of the image. * @member {string} - * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The - * virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see + * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The ARM + * resource identifier of the virtual machine image for the compute nodes. + * This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as + * the cluster. For information about the firewall settings for the Batch + * node agent to communicate with the Batch service see * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. - * @member {object} [nodeSetup] Setup to be done on all compute nodes in the - * Cluster. - * @member {object} [nodeSetup.setupTask] - * @member {string} [nodeSetup.setupTask.commandLine] Command line to be + * @member {object} [nodeSetup] Node setup. Setup (mount file systems, + * performance counters settings and custom setup task) to be performed on + * each compute node in the cluster. + * @member {object} [nodeSetup.setupTask] Setup task to run on cluster nodes + * when nodes got created or rebooted. The setup task code needs to be + * idempotent. Generally the setup task is used to download static data that + * is required for all jobs that run on the cluster VMs and/or to + * download/install software. + * @member {string} [nodeSetup.setupTask.commandLine] The command line to be * executed on each cluster's node after it being allocated or rebooted. The * command is executed in a bash subshell as a root. - * @member {array} [nodeSetup.setupTask.environmentVariables] - * @member {array} [nodeSetup.setupTask.secrets] Server will never report - * values of these variables back. + * @member {array} [nodeSetup.setupTask.environmentVariables] A collection of + * user defined environment variables to be set for setup task. + * @member {array} [nodeSetup.setupTask.secrets] A collection of user defined + * environment variables with secret values to be set for the setup task. + * Server will never report values of these variables back. * @member {string} [nodeSetup.setupTask.stdOutErrPathPrefix] The prefix of a - * path where the Batch AI service will upload the stdout and stderr of the - * setup task. - * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] Batch AI + * path where the Batch AI service will upload the stdout, stderr and + * execution log of the setup task. + * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] A path segment + * appended by Batch AI to stdOutErrPathPrefix to form a path where stdout, + * stderr and execution log of the setup task will be uploaded. Batch AI * creates the setup task output directories under an unique path to avoid - * conflicts between different clusters. You can concatinate - * stdOutErrPathPrefix and stdOutErrPathSuffix to get the full path to the - * output directory. - * @member {object} [nodeSetup.mountVolumes] Specified mount volumes will be - * available to all jobs executing on the cluster. The volumes will be + * conflicts between different clusters. The full path can be obtained by + * concatenation of stdOutErrPathPrefix and stdOutErrPathSuffix. + * @member {object} [nodeSetup.mountVolumes] Mount volumes to be available to + * setup task and all jobs executing on the cluster. The volumes will be * mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT environment * variable. - * @member {array} [nodeSetup.mountVolumes.azureFileShares] References to + * @member {array} [nodeSetup.mountVolumes.azureFileShares] A collection of * Azure File Shares that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] References - * to Azure Blob FUSE that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.fileServers] - * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] - * @member {object} [nodeSetup.performanceCountersSettings] + * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] A collection + * of Azure Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.fileServers] A collection of Batch + * AI File Servers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] A collection + * of unmanaged file systems that are to be mounted to the cluster nodes. + * @member {object} [nodeSetup.performanceCountersSettings] Settings for + * performance counters collecting and uploading. * @member {object} - * [nodeSetup.performanceCountersSettings.appInsightsReference] If provided, - * Batch AI will upload node performance counters to the corresponding Azure - * Application Insights account. + * [nodeSetup.performanceCountersSettings.appInsightsReference] Azure + * Application Insights information for performance counters reporting. If + * provided, Batch AI will upload node performance counters to the + * corresponding Azure Application Insights account. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.component] + * Azure Application Insights component resource ID. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.component.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Value of the Azure Application Insights instrumentation key. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl] - * @member {object} [userAccountSettings] Settings for user account of - * compute nodes. - * @member {string} [userAccountSettings.adminUserName] - * @member {string} [userAccountSettings.adminUserSshPublicKey] - * @member {string} [userAccountSettings.adminUserPassword] - * @member {object} [subnet] Specifies the identifier of the subnet. + * The URL referencing a secret in the Key Vault. + * @member {object} [userAccountSettings] User account settings. + * Administrator user account settings which can be used to SSH to compute + * nodes. + * @member {string} [userAccountSettings.adminUserName] Name of the + * administrator user account which can be used to SSH to nodes. + * @member {string} [userAccountSettings.adminUserSshPublicKey] SSH public + * key of the administrator user account. + * @member {string} [userAccountSettings.adminUserPassword] Password of the + * administrator user account. + * @member {object} [subnet] Subnet. Virtual network subnet resource ID the + * cluster nodes belong to. * @member {string} [subnet.id] The ID of the resource - * @member {date} [creationTime] The creation time of the cluster. - * @member {string} [provisioningState] Specifies the provisioning state of - * the cluster. Possible value are: creating - Specifies that the cluster is - * being created. succeeded - Specifies that the cluster has been created - * successfully. failed - Specifies that the cluster creation has failed. - * deleting - Specifies that the cluster is being deleted. Possible values - * include: 'creating', 'succeeded', 'failed', 'deleting' - * @member {date} [provisioningStateTransitionTime] The provisioning state - * transition time of the cluster. - * @member {string} [allocationState] Indicates whether the cluster is - * resizing. Possible values are: steady and resizing. steady state indicates - * that the cluster is not resizing. There are no changes to the number of - * compute nodes in the cluster in progress. A cluster enters this state when - * it is created and when no operations are being performed on the cluster to - * change the number of compute nodes. resizing state indicates that the - * cluster is resizing; that is, compute nodes are being added to or removed - * from the cluster. Possible values include: 'steady', 'resizing' - * @member {date} [allocationStateTransitionTime] The time at which the - * cluster entered its current allocation state. - * @member {array} [errors] Contains details of various errors on the cluster - * including resize and node setup task. This element contains all the errors - * encountered by various compute nodes during node setup. - * @member {number} [currentNodeCount] The number of compute nodes currently - * assigned to the cluster. - * @member {object} [nodeStateCounts] Counts of various node states on the - * cluster. - * @member {number} [nodeStateCounts.idleNodeCount] - * @member {number} [nodeStateCounts.runningNodeCount] - * @member {number} [nodeStateCounts.preparingNodeCount] - * @member {number} [nodeStateCounts.unusableNodeCount] - * @member {number} [nodeStateCounts.leavingNodeCount] + * @member {date} [creationTime] Creation time. The time when the cluster was + * created. + * @member {string} [provisioningState] Provisioning state. Provisioning + * state of the cluster. Possible value are: creating - Specifies that the + * cluster is being created. succeeded - Specifies that the cluster has been + * created successfully. failed - Specifies that the cluster creation has + * failed. deleting - Specifies that the cluster is being deleted. Possible + * values include: 'creating', 'succeeded', 'failed', 'deleting' + * @member {date} [provisioningStateTransitionTime] Provisioning State + * Transition time. Time when the provisioning state was changed. + * @member {string} [allocationState] Allocation state. Allocation state of + * the cluster. Possible values are: steady - Indicates that the cluster is + * not resizing. There are no changes to the number of compute nodes in the + * cluster in progress. A cluster enters this state when it is created and + * when no operations are being performed on the cluster to change the number + * of compute nodes. resizing - Indicates that the cluster is resizing; that + * is, compute nodes are being added to or removed from the cluster. Possible + * values include: 'steady', 'resizing' + * @member {date} [allocationStateTransitionTime] Allocation state transition + * time. The time at which the cluster entered its current allocation state. + * @member {array} [errors] Errors. Collection of errors encountered by + * various compute nodes during node setup. + * @member {number} [currentNodeCount] Current node count. The number of + * compute nodes currently assigned to the cluster. + * @member {object} [nodeStateCounts] Node state counts. Counts of various + * node states on the cluster. + * @member {number} [nodeStateCounts.idleNodeCount] Number of compute nodes + * in idle state. + * @member {number} [nodeStateCounts.runningNodeCount] Number of compute + * nodes which are running jobs. + * @member {number} [nodeStateCounts.preparingNodeCount] Number of compute + * nodes which are being prepared. + * @member {number} [nodeStateCounts.unusableNodeCount] Number of compute + * nodes which are in unusable state. + * @member {number} [nodeStateCounts.leavingNodeCount] Number of compute + * nodes which are leaving the cluster. */ constructor() { super(); @@ -191,29 +228,6 @@ class Cluster extends models['Resource'] { name: 'String' } }, - location: { - required: false, - readOnly: true, - serializedName: 'location', - type: { - name: 'String' - } - }, - tags: { - required: false, - readOnly: true, - serializedName: 'tags', - type: { - name: 'Dictionary', - value: { - required: false, - serializedName: 'StringElementType', - type: { - name: 'String' - } - } - } - }, vmSize: { required: false, serializedName: 'properties.vmSize', diff --git a/lib/services/batchaiManagement/lib/models/clusterCreateParameters.js b/lib/services/batchaiManagement/lib/models/clusterCreateParameters.js index 7953a5d3ad..b3ffda2f65 100644 --- a/lib/services/batchaiManagement/lib/models/clusterCreateParameters.js +++ b/lib/services/batchaiManagement/lib/models/clusterCreateParameters.js @@ -13,109 +13,144 @@ const models = require('./index'); /** - * Parameters supplied to the Create operation. + * Cluster creation operation. * */ class ClusterCreateParameters { /** * Create a ClusterCreateParameters. - * @member {string} location The region in which to create the cluster. - * @member {object} [tags] The user specified tags associated with the - * Cluster. - * @member {string} vmSize The size of the virtual machines in the cluster. - * All virtual machines in a cluster are the same size. For information about - * available VM sizes for clusters using images from the Virtual Machines - * Marketplace (see Sizes for Virtual Machines (Linux) or Sizes for Virtual - * Machines (Windows). Batch AI service supports all Azure VM sizes except - * STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and - * STANDARD_DSV2 series). - * @member {string} [vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority'. Default - * value: 'dedicated' . - * @member {object} [scaleSettings] Desired scale for the cluster. - * @member {object} [scaleSettings.manual] - * @member {number} [scaleSettings.manual.targetNodeCount] Default is 0. If - * autoScaleSettings are not specified, then the Cluster starts with this - * target. - * @member {string} [scaleSettings.manual.nodeDeallocationOption] The default - * value is requeue. Possible values include: 'requeue', 'terminate', + * @member {string} vmSize VM size. The size of the virtual machines in the + * cluster. All nodes in a cluster have the same VM size. For information + * about available VM sizes for clusters using images from the Virtual + * Machines Marketplace see Sizes for Virtual Machines (Linux). Batch AI + * service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + * @member {string} [vmPriority] VM priority. VM priority. Allowed values + * are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority'. Default value: 'dedicated' . + * @member {object} [scaleSettings] Scale settings. Scale settings for the + * cluster. Batch AI service supports manual and auto scale clusters. + * @member {object} [scaleSettings.manual] Manual scale settings for the + * cluster. + * @member {number} [scaleSettings.manual.targetNodeCount] The desired number + * of compute nodes in the Cluster. Default is 0. + * @member {string} [scaleSettings.manual.nodeDeallocationOption] An action + * to be performed when the cluster size is decreasing. The default value is + * requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion' - * @member {object} [scaleSettings.autoScale] - * @member {number} [scaleSettings.autoScale.minimumNodeCount] - * @member {number} [scaleSettings.autoScale.maximumNodeCount] - * @member {number} [scaleSettings.autoScale.initialNodeCount] - * @member {object} [virtualMachineConfiguration] Settings for OS image and - * mounted data volumes. - * @member {object} [virtualMachineConfiguration.imageReference] + * @member {object} [scaleSettings.autoScale] Auto-scale settings for the + * cluster. + * @member {number} [scaleSettings.autoScale.minimumNodeCount] The minimum + * number of compute nodes the Batch AI service will try to allocate for the + * cluster. Note, the actual number of nodes can be less than the specified + * value if the subscription has not enough quota to fulfill the request. + * @member {number} [scaleSettings.autoScale.maximumNodeCount] The maximum + * number of compute nodes the cluster can have. + * @member {number} [scaleSettings.autoScale.initialNodeCount] The number of + * compute nodes to allocate on cluster creation. Note that this value is + * used only during cluster creation. Default: 0. + * @member {object} [virtualMachineConfiguration] VM configuration. OS image + * configuration for cluster nodes. All nodes in a cluster have the same OS + * image. + * @member {object} [virtualMachineConfiguration.imageReference] OS image + * reference for cluster nodes. * @member {string} [virtualMachineConfiguration.imageReference.publisher] - * @member {string} [virtualMachineConfiguration.imageReference.offer] - * @member {string} [virtualMachineConfiguration.imageReference.sku] + * Publisher of the image. + * @member {string} [virtualMachineConfiguration.imageReference.offer] Offer + * of the image. + * @member {string} [virtualMachineConfiguration.imageReference.sku] SKU of + * the image. * @member {string} [virtualMachineConfiguration.imageReference.version] + * Version of the image. * @member {string} - * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The - * virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see + * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The ARM + * resource identifier of the virtual machine image for the compute nodes. + * This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as + * the cluster. For information about the firewall settings for the Batch + * node agent to communicate with the Batch service see * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. - * @member {object} [nodeSetup] Setup to be done on all compute nodes in the - * cluster. - * @member {object} [nodeSetup.setupTask] - * @member {string} [nodeSetup.setupTask.commandLine] Command line to be + * @member {object} [nodeSetup] Node setup. Setup to be performed on each + * compute node in the cluster. + * @member {object} [nodeSetup.setupTask] Setup task to run on cluster nodes + * when nodes got created or rebooted. The setup task code needs to be + * idempotent. Generally the setup task is used to download static data that + * is required for all jobs that run on the cluster VMs and/or to + * download/install software. + * @member {string} [nodeSetup.setupTask.commandLine] The command line to be * executed on each cluster's node after it being allocated or rebooted. The * command is executed in a bash subshell as a root. - * @member {array} [nodeSetup.setupTask.environmentVariables] - * @member {array} [nodeSetup.setupTask.secrets] Server will never report - * values of these variables back. + * @member {array} [nodeSetup.setupTask.environmentVariables] A collection of + * user defined environment variables to be set for setup task. + * @member {array} [nodeSetup.setupTask.secrets] A collection of user defined + * environment variables with secret values to be set for the setup task. + * Server will never report values of these variables back. * @member {string} [nodeSetup.setupTask.stdOutErrPathPrefix] The prefix of a - * path where the Batch AI service will upload the stdout and stderr of the - * setup task. - * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] Batch AI + * path where the Batch AI service will upload the stdout, stderr and + * execution log of the setup task. + * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] A path segment + * appended by Batch AI to stdOutErrPathPrefix to form a path where stdout, + * stderr and execution log of the setup task will be uploaded. Batch AI * creates the setup task output directories under an unique path to avoid - * conflicts between different clusters. You can concatinate - * stdOutErrPathPrefix and stdOutErrPathSuffix to get the full path to the - * output directory. - * @member {object} [nodeSetup.mountVolumes] Specified mount volumes will be - * available to all jobs executing on the cluster. The volumes will be + * conflicts between different clusters. The full path can be obtained by + * concatenation of stdOutErrPathPrefix and stdOutErrPathSuffix. + * @member {object} [nodeSetup.mountVolumes] Mount volumes to be available to + * setup task and all jobs executing on the cluster. The volumes will be * mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT environment * variable. - * @member {array} [nodeSetup.mountVolumes.azureFileShares] References to + * @member {array} [nodeSetup.mountVolumes.azureFileShares] A collection of * Azure File Shares that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] References - * to Azure Blob FUSE that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.fileServers] - * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] - * @member {object} [nodeSetup.performanceCountersSettings] + * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] A collection + * of Azure Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.fileServers] A collection of Batch + * AI File Servers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] A collection + * of unmanaged file systems that are to be mounted to the cluster nodes. + * @member {object} [nodeSetup.performanceCountersSettings] Settings for + * performance counters collecting and uploading. * @member {object} - * [nodeSetup.performanceCountersSettings.appInsightsReference] If provided, - * Batch AI will upload node performance counters to the corresponding Azure - * Application Insights account. + * [nodeSetup.performanceCountersSettings.appInsightsReference] Azure + * Application Insights information for performance counters reporting. If + * provided, Batch AI will upload node performance counters to the + * corresponding Azure Application Insights account. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.component] + * Azure Application Insights component resource ID. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.component.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Value of the Azure Application Insights instrumentation key. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl] - * @member {object} userAccountSettings Settings for user account that will - * be created on all compute nodes of the cluster. - * @member {string} [userAccountSettings.adminUserName] - * @member {string} [userAccountSettings.adminUserSshPublicKey] - * @member {string} [userAccountSettings.adminUserPassword] - * @member {object} [subnet] Specifies the identifier of the subnet. . + * The URL referencing a secret in the Key Vault. + * @member {object} userAccountSettings User account settings. Settings for + * an administrator user account that will be created on each compute node in + * the cluster. + * @member {string} [userAccountSettings.adminUserName] Name of the + * administrator user account which can be used to SSH to nodes. + * @member {string} [userAccountSettings.adminUserSshPublicKey] SSH public + * key of the administrator user account. + * @member {string} [userAccountSettings.adminUserPassword] Password of the + * administrator user account. + * @member {object} [subnet] Subnet. Existing virtual network subnet to put + * the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * @member {string} [subnet.id] The ID of the resource */ constructor() { @@ -135,27 +170,6 @@ class ClusterCreateParameters { name: 'Composite', className: 'ClusterCreateParameters', modelProperties: { - location: { - required: true, - serializedName: 'location', - type: { - name: 'String' - } - }, - tags: { - required: false, - serializedName: 'tags', - type: { - name: 'Dictionary', - value: { - required: false, - serializedName: 'StringElementType', - type: { - name: 'String' - } - } - } - }, vmSize: { required: true, serializedName: 'properties.vmSize', diff --git a/lib/services/batchaiManagement/lib/models/clusterUpdateParameters.js b/lib/services/batchaiManagement/lib/models/clusterUpdateParameters.js index 6e191c7608..a1c1a40df8 100644 --- a/lib/services/batchaiManagement/lib/models/clusterUpdateParameters.js +++ b/lib/services/batchaiManagement/lib/models/clusterUpdateParameters.js @@ -13,26 +13,33 @@ const models = require('./index'); /** - * Parameters supplied to the Update operation. + * Cluster update parameters. * */ class ClusterUpdateParameters { /** * Create a ClusterUpdateParameters. - * @member {object} [tags] The user specified tags associated with the - * Cluster. - * @member {object} [scaleSettings] Desired scale for the cluster. - * @member {object} [scaleSettings.manual] - * @member {number} [scaleSettings.manual.targetNodeCount] Default is 0. If - * autoScaleSettings are not specified, then the Cluster starts with this - * target. - * @member {string} [scaleSettings.manual.nodeDeallocationOption] The default - * value is requeue. Possible values include: 'requeue', 'terminate', + * @member {object} [scaleSettings] Scale settings. Desired scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. + * @member {object} [scaleSettings.manual] Manual scale settings for the + * cluster. + * @member {number} [scaleSettings.manual.targetNodeCount] The desired number + * of compute nodes in the Cluster. Default is 0. + * @member {string} [scaleSettings.manual.nodeDeallocationOption] An action + * to be performed when the cluster size is decreasing. The default value is + * requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion' - * @member {object} [scaleSettings.autoScale] - * @member {number} [scaleSettings.autoScale.minimumNodeCount] - * @member {number} [scaleSettings.autoScale.maximumNodeCount] - * @member {number} [scaleSettings.autoScale.initialNodeCount] + * @member {object} [scaleSettings.autoScale] Auto-scale settings for the + * cluster. + * @member {number} [scaleSettings.autoScale.minimumNodeCount] The minimum + * number of compute nodes the Batch AI service will try to allocate for the + * cluster. Note, the actual number of nodes can be less than the specified + * value if the subscription has not enough quota to fulfill the request. + * @member {number} [scaleSettings.autoScale.maximumNodeCount] The maximum + * number of compute nodes the cluster can have. + * @member {number} [scaleSettings.autoScale.initialNodeCount] The number of + * compute nodes to allocate on cluster creation. Note that this value is + * used only during cluster creation. Default: 0. */ constructor() { } @@ -51,20 +58,6 @@ class ClusterUpdateParameters { name: 'Composite', className: 'ClusterUpdateParameters', modelProperties: { - tags: { - required: false, - serializedName: 'tags', - type: { - name: 'Dictionary', - value: { - required: false, - serializedName: 'StringElementType', - type: { - name: 'String' - } - } - } - }, scaleSettings: { required: false, serializedName: 'properties.scaleSettings', diff --git a/lib/services/batchaiManagement/lib/models/containerSettings.js b/lib/services/batchaiManagement/lib/models/containerSettings.js index 7991c38e69..8d1d940238 100644 --- a/lib/services/batchaiManagement/lib/models/containerSettings.js +++ b/lib/services/batchaiManagement/lib/models/containerSettings.js @@ -13,31 +13,39 @@ const models = require('./index'); /** - * Settings for the container to be downloaded. + * Docker container settings. * */ class ContainerSettings { /** * Create a ContainerSettings. - * @member {object} imageSourceRegistry Registry to download the container - * from. - * @member {string} [imageSourceRegistry.serverUrl] - * @member {string} [imageSourceRegistry.image] - * @member {object} [imageSourceRegistry.credentials] - * @member {string} [imageSourceRegistry.credentials.username] - * @member {string} [imageSourceRegistry.credentials.password] One of - * password or passwordSecretReference must be specified. - * @member {object} [imageSourceRegistry.credentials.passwordSecretReference] - * Users can store their secrets in Azure KeyVault and pass it to the Batch - * AI Service to integrate with KeyVault. One of password or + * @member {object} imageSourceRegistry Image source registry. Information + * about docker image and docker registry to download the container from. + * @member {string} [imageSourceRegistry.serverUrl] URL for image repository. + * @member {string} [imageSourceRegistry.image] The name of the image in the + * image repository. + * @member {object} [imageSourceRegistry.credentials] Credentials to access + * the private docker repository. + * @member {string} [imageSourceRegistry.credentials.username] User name to + * login to the repository. + * @member {string} [imageSourceRegistry.credentials.password] User password + * to login to the docker repository. One of password or * passwordSecretReference must be specified. + * @member {object} [imageSourceRegistry.credentials.passwordSecretReference] + * KeyVault Secret storing the password. Users can store their secrets in + * Azure KeyVault and pass it to the Batch AI service to integrate with + * KeyVault. One of password or passwordSecretReference must be specified. * @member {object} * [imageSourceRegistry.credentials.passwordSecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id] * The ID of the resource * @member {string} - * [imageSourceRegistry.credentials.passwordSecretReference.secretUrl] + * [imageSourceRegistry.credentials.passwordSecretReference.secretUrl] The + * URL referencing a secret in the Key Vault. + * @member {string} [shmSize] /dev/shm size. Size of /dev/shm. Please refer + * to docker documentation for supported argument formats. */ constructor() { } @@ -63,6 +71,13 @@ class ContainerSettings { name: 'Composite', className: 'ImageSourceRegistry' } + }, + shmSize: { + required: false, + serializedName: 'shmSize', + type: { + name: 'String' + } } } } diff --git a/lib/services/batchaiManagement/lib/models/customMpiSettings.js b/lib/services/batchaiManagement/lib/models/customMpiSettings.js index 8ef23879f0..94eea418db 100644 --- a/lib/services/batchaiManagement/lib/models/customMpiSettings.js +++ b/lib/services/batchaiManagement/lib/models/customMpiSettings.js @@ -11,17 +11,17 @@ 'use strict'; /** - * Specifies the settings for a custom tool kit job. + * Custom MPI job settings. * */ class CustomMpiSettings { /** * Create a CustomMpiSettings. - * @member {string} commandLine The program and program command line - * parameters to be executed by mpi runtime. - * @member {number} [processCount] Number of processes parameter that is - * passed to MPI runtime. The default value for this property is equal to - * nodeCount property + * @member {string} commandLine Command line. The command line to be executed + * by mpi runtime on each compute node. + * @member {number} [processCount] Process count. Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/customToolkitSettings.js b/lib/services/batchaiManagement/lib/models/customToolkitSettings.js index 51e6b5ffa6..bf82ceab11 100644 --- a/lib/services/batchaiManagement/lib/models/customToolkitSettings.js +++ b/lib/services/batchaiManagement/lib/models/customToolkitSettings.js @@ -11,14 +11,14 @@ 'use strict'; /** - * Specifies the settings for a custom tool kit job. + * Custom tool kit job settings. * */ class CustomToolkitSettings { /** * Create a CustomToolkitSettings. - * @member {string} [commandLine] The command line to execute the custom - * toolkit Job. + * @member {string} [commandLine] Command line. The command line to execute + * on the master node. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/dataDisks.js b/lib/services/batchaiManagement/lib/models/dataDisks.js index 4372837723..2592a6580f 100644 --- a/lib/services/batchaiManagement/lib/models/dataDisks.js +++ b/lib/services/batchaiManagement/lib/models/dataDisks.js @@ -11,22 +11,26 @@ 'use strict'; /** - * Settings for the data disk which would be created for the File Server. + * Data disks settings. * */ class DataDisks { /** * Create a DataDisks. - * @member {number} diskSizeInGB Initial disk size in GB for blank data - * disks, and the new desired size for resizing existing data disks. - * @member {string} [cachingType] None, ReadOnly, ReadWrite. Default value is - * None. This property is not patchable. Possible values include: 'none', - * 'readonly', 'readwrite'. Default value: 'none' . - * @member {number} diskCount Number of data disks to be attached to the VM. - * RAID level 0 will be applied in the case of multiple disks. - * @member {string} storageAccountType Specifies the type of storage account - * to be used on the disk. Possible values are: Standard_LRS or Premium_LRS. - * Possible values include: 'Standard_LRS', 'Premium_LRS' + * @member {number} diskSizeInGB Disk size in GB. Disk size in GB for the + * blank data disks. + * @member {string} [cachingType] Caching type. Caching type for the disks. + * Available values are none (default), readonly, readwrite. Caching type can + * be set only for VM sizes supporting premium storage. Possible values + * include: 'none', 'readonly', 'readwrite'. Default value: 'none' . + * @member {number} diskCount Number of data disks. Number of data disks + * attached to the File Server. If multiple disks attached, they will be + * configured in RAID level 0. + * @member {string} storageAccountType Storage account type. Type of storage + * account to be used on the disk. Possible values are: Standard_LRS or + * Premium_LRS. Premium storage account type can only be used with VM sizes + * supporting premium storage. Possible values include: 'Standard_LRS', + * 'Premium_LRS' */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/environmentVariable.js b/lib/services/batchaiManagement/lib/models/environmentVariable.js index bec29c6ea6..c2533b998d 100644 --- a/lib/services/batchaiManagement/lib/models/environmentVariable.js +++ b/lib/services/batchaiManagement/lib/models/environmentVariable.js @@ -11,14 +11,14 @@ 'use strict'; /** - * A collection of environment variables to set. + * An environment variable definition. * */ class EnvironmentVariable { /** * Create a EnvironmentVariable. - * @member {string} name The name of the environment variable. - * @member {string} value The value of the environment variable. + * @member {string} name Name. The name of the environment variable. + * @member {string} value Value. The value of the environment variable. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/environmentVariableWithSecretValue.js b/lib/services/batchaiManagement/lib/models/environmentVariableWithSecretValue.js index 9559192a7a..5c3c5cc2b8 100644 --- a/lib/services/batchaiManagement/lib/models/environmentVariableWithSecretValue.js +++ b/lib/services/batchaiManagement/lib/models/environmentVariableWithSecretValue.js @@ -13,25 +13,25 @@ const models = require('./index'); /** - * A collection of environment variables with secret values to set. + * An environment variable with secret value definition. * */ class EnvironmentVariableWithSecretValue { /** * Create a EnvironmentVariableWithSecretValue. - * @member {string} name The name of the environment variable to store the - * secret value. - * @member {string} [value] The value of the environment variable. This value - * will never be reported back by Batch AI. - * @member {object} [valueSecretReference] Specifies the location of the - * Azure KeyVault secret which will be used as the environment variable - * value. Specifies KeyVault Store and Secret which contains the value for - * the environment variable. One of value or valueSecretReference must be - * provided. - * @member {object} [valueSecretReference.sourceVault] + * @member {string} name Name. The name of the environment variable to store + * the secret value. + * @member {string} [value] Value. The value of the environment variable. + * This value will never be reported back by Batch AI. + * @member {object} [valueSecretReference] KeyVault secret reference. + * KeyVault store and secret which contains the value for the environment + * variable. One of value or valueSecretReference must be provided. + * @member {object} [valueSecretReference.sourceVault] Fully qualified + * resource indentifier of the Key Vault. * @member {string} [valueSecretReference.sourceVault.id] The ID of the * resource - * @member {string} [valueSecretReference.secretUrl] + * @member {string} [valueSecretReference.secretUrl] The URL referencing a + * secret in the Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/experiment.js b/lib/services/batchaiManagement/lib/models/experiment.js index 68f4e2550f..5a754830d5 100644 --- a/lib/services/batchaiManagement/lib/models/experiment.js +++ b/lib/services/batchaiManagement/lib/models/experiment.js @@ -13,20 +13,21 @@ const models = require('./index'); /** - * Contains information about the experiment. + * Experiment information. * * @extends models['ProxyResource'] */ class Experiment extends models['ProxyResource'] { /** * Create a Experiment. - * @member {date} [creationTime] Time when the Experiment was created. - * @member {string} [provisioningState] The provisioned state of the - * experiment. Possible values include: 'creating', 'succeeded', 'failed', - * 'deleting' - * @member {date} [provisioningStateTransitionTime] The time at which the - * experiment entered its current provisioning state. The time at which the - * experiment entered its current provisioning state. + * @member {date} [creationTime] Creation time. Time when the Experiment was + * created. + * @member {string} [provisioningState] Provisioning state. The provisioned + * state of the experiment. Possible values include: 'creating', 'succeeded', + * 'failed', 'deleting' + * @member {date} [provisioningStateTransitionTime] Provisioning state + * transition time. The time at which the experiment entered its current + * provisioning state. */ constructor() { super(); diff --git a/lib/services/batchaiManagement/lib/models/file.js b/lib/services/batchaiManagement/lib/models/file.js index 7562beec5d..fcf5e05bef 100644 --- a/lib/services/batchaiManagement/lib/models/file.js +++ b/lib/services/batchaiManagement/lib/models/file.js @@ -17,14 +17,14 @@ class File { /** * Create a File. - * @member {string} [name] Name of the file. - * @member {string} [fileType] Contains information about file type. Possible - * values include: 'file', 'directory' - * @member {string} [downloadUrl] Will contain an URL to download the + * @member {string} [name] Name. Name of the file. + * @member {string} [fileType] File type. Type of the file. Possible values + * are file and directory. Possible values include: 'file', 'directory' + * @member {string} [downloadUrl] Download URL. URL to download the * corresponding file. The downloadUrl is not returned for directories. - * @member {date} [lastModified] The time at which the file was last - * modified. The time at which the file was last modified. - * @member {number} [contentLength] The file size. The file size. + * @member {date} [lastModified] Last modified time. The time at which the + * file was last modified. + * @member {number} [contentLength] Content length. The file of the size. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/fileServer.js b/lib/services/batchaiManagement/lib/models/fileServer.js index bad1191a4b..80345efbf9 100644 --- a/lib/services/batchaiManagement/lib/models/fileServer.js +++ b/lib/services/batchaiManagement/lib/models/fileServer.js @@ -13,51 +13,71 @@ const models = require('./index'); /** - * Contains information about the File Server. + * File Server information. * - * @extends models['Resource'] + * @extends models['ProxyResource'] */ -class FileServer extends models['Resource'] { +class FileServer extends models['ProxyResource'] { /** * Create a FileServer. - * @member {string} [vmSize] The size of the virtual machine of the File - * Server. For information about available VM sizes for File Server from the - * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). - * @member {object} [sshConfiguration] SSH settings for the File Server. - * @member {array} [sshConfiguration.publicIPsToAllow] Default value is '*' - * can be used to match all source IPs. Maximum number of IP ranges that can - * be specified are 400. - * @member {object} [sshConfiguration.userAccountSettings] - * @member {string} [sshConfiguration.userAccountSettings.adminUserName] + * @member {string} [vmSize] VM size. VM size of the File Server. + * @member {object} [sshConfiguration] SSH configuration. SSH configuration + * for accessing the File Server node. + * @member {array} [sshConfiguration.publicIPsToAllow] List of source IP + * ranges to allow SSH connection from. The default value is '*' (all source + * IPs are allowed). Maximum number of IP ranges that can be specified is + * 400. + * @member {object} [sshConfiguration.userAccountSettings] Settings for + * administrator user account to be created on a node. The account can be + * used to establish SSH connection to the node. + * @member {string} [sshConfiguration.userAccountSettings.adminUserName] Name + * of the administrator user account which can be used to SSH to nodes. * @member {string} - * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] + * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH public + * key of the administrator user account. * @member {string} [sshConfiguration.userAccountSettings.adminUserPassword] - * @member {object} [dataDisks] Settings for the data disk which would be - * created for the File Server. - * @member {number} [dataDisks.diskSizeInGB] - * @member {string} [dataDisks.cachingType] Possible values include: 'none', - * 'readonly', 'readwrite' - * @member {number} [dataDisks.diskCount] - * @member {string} [dataDisks.storageAccountType] Possible values include: - * 'Standard_LRS', 'Premium_LRS' - * @member {object} [subnet] Specifies the identifier of the subnet. + * Password of the administrator user account. + * @member {object} [dataDisks] Data disks configuration. Information about + * disks attached to File Server VM. + * @member {number} [dataDisks.diskSizeInGB] Disk size in GB for the blank + * data disks. + * @member {string} [dataDisks.cachingType] Caching type for the disks. + * Available values are none (default), readonly, readwrite. Caching type can + * be set only for VM sizes supporting premium storage. Possible values + * include: 'none', 'readonly', 'readwrite' + * @member {number} [dataDisks.diskCount] Number of data disks attached to + * the File Server. If multiple disks attached, they will be configured in + * RAID level 0. + * @member {string} [dataDisks.storageAccountType] Type of storage account to + * be used on the disk. Possible values are: Standard_LRS or Premium_LRS. + * Premium storage account type can only be used with VM sizes supporting + * premium storage. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @member {object} [subnet] Subnet. File Server virtual network subnet + * resource ID. * @member {string} [subnet.id] The ID of the resource - * @member {object} [mountSettings] Details of the File Server. - * @member {string} [mountSettings.mountPoint] - * @member {string} [mountSettings.fileServerPublicIP] - * @member {string} [mountSettings.fileServerInternalIP] - * @member {date} [provisioningStateTransitionTime] Time when the status was - * changed. - * @member {date} [creationTime] Time when the FileServer was created. - * @member {string} [provisioningState] Specifies the provisioning state of - * the File Server. Possible values: creating - The File Server is getting - * created. updating - The File Server creation has been accepted and it is - * getting updated. deleting - The user has requested that the File Server be - * deleted, and it is in the process of being deleted. failed - The File - * Server creation has failed with the specified errorCode. Details about the - * error code are specified in the message field. succeeded - The File Server - * creation has succeeded. Possible values include: 'creating', 'updating', - * 'deleting', 'succeeded', 'failed' + * @member {object} [mountSettings] Mount settings. File Server mount + * settings. + * @member {string} [mountSettings.mountPoint] Path where the data disks are + * mounted on the File Server. + * @member {string} [mountSettings.fileServerPublicIP] Public IP address of + * the File Server which can be used to SSH to the node from outside of the + * subnet. + * @member {string} [mountSettings.fileServerInternalIP] Internal IP address + * of the File Server which can be used to access the File Server from within + * the subnet. + * @member {date} [provisioningStateTransitionTime] Provisioning State + * Transition time. Time when the provisioning state was changed. + * @member {date} [creationTime] Creation time. Time when the FileServer was + * created. + * @member {string} [provisioningState] Provisioning state. Provisioning + * state of the File Server. Possible values: creating - The File Server is + * getting created; updating - The File Server creation has been accepted and + * it is getting updated; deleting - The user has requested that the File + * Server be deleted, and it is in the process of being deleted; failed - The + * File Server creation has failed with the specified error code. Details + * about the error code are specified in the message field; succeeded - The + * File Server creation has succeeded. Possible values include: 'creating', + * 'updating', 'deleting', 'succeeded', 'failed' */ constructor() { super(); @@ -101,29 +121,6 @@ class FileServer extends models['Resource'] { name: 'String' } }, - location: { - required: false, - readOnly: true, - serializedName: 'location', - type: { - name: 'String' - } - }, - tags: { - required: false, - readOnly: true, - serializedName: 'tags', - type: { - name: 'Dictionary', - value: { - required: false, - serializedName: 'StringElementType', - type: { - name: 'String' - } - } - } - }, vmSize: { required: false, serializedName: 'properties.vmSize', diff --git a/lib/services/batchaiManagement/lib/models/fileServerCreateParameters.js b/lib/services/batchaiManagement/lib/models/fileServerCreateParameters.js index 23e6103b57..1013460ec7 100644 --- a/lib/services/batchaiManagement/lib/models/fileServerCreateParameters.js +++ b/lib/services/batchaiManagement/lib/models/fileServerCreateParameters.js @@ -13,36 +13,49 @@ const models = require('./index'); /** - * Parameters supplied to the Create operation. + * File Server creation parameters. * */ class FileServerCreateParameters { /** * Create a FileServerCreateParameters. - * @member {string} location The region in which to create the File Server. - * @member {object} [tags] The user specified tags associated with the File - * Server. - * @member {string} vmSize The size of the virtual machine of the file - * server. For information about available VM sizes for fileservers from the - * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). - * @member {object} sshConfiguration SSH configuration for the file server. - * @member {array} [sshConfiguration.publicIPsToAllow] Default value is '*' - * can be used to match all source IPs. Maximum number of IP ranges that can - * be specified are 400. - * @member {object} [sshConfiguration.userAccountSettings] - * @member {string} [sshConfiguration.userAccountSettings.adminUserName] + * @member {string} vmSize VM size. The size of the virtual machine for the + * File Server. For information about available VM sizes from the Virtual + * Machines Marketplace, see Sizes for Virtual Machines (Linux). + * @member {object} sshConfiguration SSH configuration. SSH configuration for + * the File Server node. + * @member {array} [sshConfiguration.publicIPsToAllow] List of source IP + * ranges to allow SSH connection from. The default value is '*' (all source + * IPs are allowed). Maximum number of IP ranges that can be specified is + * 400. + * @member {object} [sshConfiguration.userAccountSettings] Settings for + * administrator user account to be created on a node. The account can be + * used to establish SSH connection to the node. + * @member {string} [sshConfiguration.userAccountSettings.adminUserName] Name + * of the administrator user account which can be used to SSH to nodes. * @member {string} - * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] + * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH public + * key of the administrator user account. * @member {string} [sshConfiguration.userAccountSettings.adminUserPassword] - * @member {object} dataDisks Settings for the data disk which would be - * created for the file server. - * @member {number} [dataDisks.diskSizeInGB] - * @member {string} [dataDisks.cachingType] Possible values include: 'none', - * 'readonly', 'readwrite' - * @member {number} [dataDisks.diskCount] - * @member {string} [dataDisks.storageAccountType] Possible values include: - * 'Standard_LRS', 'Premium_LRS' - * @member {object} [subnet] Specifies the identifier of the subnet. + * Password of the administrator user account. + * @member {object} dataDisks Data disks. Settings for the data disks which + * will be created for the File Server. + * @member {number} [dataDisks.diskSizeInGB] Disk size in GB for the blank + * data disks. + * @member {string} [dataDisks.cachingType] Caching type for the disks. + * Available values are none (default), readonly, readwrite. Caching type can + * be set only for VM sizes supporting premium storage. Possible values + * include: 'none', 'readonly', 'readwrite' + * @member {number} [dataDisks.diskCount] Number of data disks attached to + * the File Server. If multiple disks attached, they will be configured in + * RAID level 0. + * @member {string} [dataDisks.storageAccountType] Type of storage account to + * be used on the disk. Possible values are: Standard_LRS or Premium_LRS. + * Premium storage account type can only be used with VM sizes supporting + * premium storage. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @member {object} [subnet] Subnet identifier. Identifier of an existing + * virtual network subnet to put the File Server in. If not provided, a new + * virtual network and subnet will be created. * @member {string} [subnet.id] The ID of the resource */ constructor() { @@ -62,27 +75,6 @@ class FileServerCreateParameters { name: 'Composite', className: 'FileServerCreateParameters', modelProperties: { - location: { - required: true, - serializedName: 'location', - type: { - name: 'String' - } - }, - tags: { - required: false, - serializedName: 'tags', - type: { - name: 'Dictionary', - value: { - required: false, - serializedName: 'StringElementType', - type: { - name: 'String' - } - } - } - }, vmSize: { required: true, serializedName: 'properties.vmSize', diff --git a/lib/services/batchaiManagement/lib/models/fileServerListResult.js b/lib/services/batchaiManagement/lib/models/fileServerListResult.js index c714436697..cbec1e1c5c 100644 --- a/lib/services/batchaiManagement/lib/models/fileServerListResult.js +++ b/lib/services/batchaiManagement/lib/models/fileServerListResult.js @@ -11,7 +11,7 @@ 'use strict'; /** - * Values returned by the List operation. + * Values returned by the File Server List operation. */ class FileServerListResult extends Array { /** diff --git a/lib/services/batchaiManagement/lib/models/fileServerReference.js b/lib/services/batchaiManagement/lib/models/fileServerReference.js index c0e50ec4fb..dff71114e5 100644 --- a/lib/services/batchaiManagement/lib/models/fileServerReference.js +++ b/lib/services/batchaiManagement/lib/models/fileServerReference.js @@ -13,25 +13,25 @@ const models = require('./index'); /** - * Provides required information, for the service to be able to mount Azure - * FileShare on the cluster nodes. + * File Server mounting configuration. * */ class FileServerReference { /** * Create a FileServerReference. - * @member {object} fileServer Reference to the file server resource. + * @member {object} fileServer File server. Resource ID of the existing File + * Server to be mounted. * @member {string} [fileServer.id] The ID of the resource - * @member {string} [sourceDirectory] Specifies the source directory in File - * Server that needs to be mounted. If this property is not specified, the - * entire File Server will be mounted. - * @member {string} relativeMountPath Specifies the relative path on the - * compute node where the File Server will be mounted. Note that all cluster - * level file servers will be mounted under $AZ_BATCHAI_MOUNT_ROOT location - * and job level file servers will be mouted under + * @member {string} [sourceDirectory] Source directory. File Server directory + * that needs to be mounted. If this property is not specified, the entire + * File Server will be mounted. + * @member {string} relativeMountPath Relative mount path. The relative path + * on the compute node where the File Server will be mounted. Note that all + * cluster level file servers will be mounted under $AZ_BATCHAI_MOUNT_ROOT + * location and all job level file servers will be mounted under * $AZ_BATCHAI_JOB_MOUNT_ROOT. - * @member {string} [mountOptions] Specifies the mount options for File - * Server. + * @member {string} [mountOptions] Mount options. Mount options to be passed + * to mount command. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/horovodSettings.js b/lib/services/batchaiManagement/lib/models/horovodSettings.js index 55fb45058b..527b2d63ad 100644 --- a/lib/services/batchaiManagement/lib/models/horovodSettings.js +++ b/lib/services/batchaiManagement/lib/models/horovodSettings.js @@ -11,20 +11,21 @@ 'use strict'; /** - * Specifies the settings for Chainer job. + * Specifies the settings for Horovod job. * */ class HorovodSettings { /** * Create a HorovodSettings. - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script. - * @member {number} [processCount] Number of processes parameter that is - * passed to MPI runtime. The default value for this property is equal to - * nodeCount property + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. + * @member {number} [processCount] Process count. Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/imageReference.js b/lib/services/batchaiManagement/lib/models/imageReference.js index 70e0564425..54fc7e7a4b 100644 --- a/lib/services/batchaiManagement/lib/models/imageReference.js +++ b/lib/services/batchaiManagement/lib/models/imageReference.js @@ -11,19 +11,19 @@ 'use strict'; /** - * The image reference. + * The OS image reference. * */ class ImageReference { /** * Create a ImageReference. - * @member {string} publisher Publisher of the image. - * @member {string} offer Offer of the image. - * @member {string} sku SKU of the image. - * @member {string} [version] Version of the image. - * @member {string} [virtualMachineImageId] The ARM resource identifier of - * the virtual machine image. Computes nodes of the cluster will be created - * using this custom image. This is of the form + * @member {string} publisher Publisher. Publisher of the image. + * @member {string} offer Offer. Offer of the image. + * @member {string} sku SKU. SKU of the image. + * @member {string} [version] Version. Version of the image. + * @member {string} [virtualMachineImageId] Custom VM image resource ID. The + * ARM resource identifier of the virtual machine image for the compute + * nodes. This is of the form * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. * The virtual machine image must be in the same region and subscription as * the cluster. For information about the firewall settings for the Batch diff --git a/lib/services/batchaiManagement/lib/models/imageSourceRegistry.js b/lib/services/batchaiManagement/lib/models/imageSourceRegistry.js index 5976ee907f..313222e43d 100644 --- a/lib/services/batchaiManagement/lib/models/imageSourceRegistry.js +++ b/lib/services/batchaiManagement/lib/models/imageSourceRegistry.js @@ -13,27 +13,32 @@ const models = require('./index'); /** - * Details of the container image such as name, URL and credentials. + * Information about docker image for the job. * */ class ImageSourceRegistry { /** * Create a ImageSourceRegistry. - * @member {string} [serverUrl] URL for image repository. - * @member {string} image The name of the image in image repository. - * @member {object} [credentials] Information to access the private Docker + * @member {string} [serverUrl] Server URL. URL for image repository. + * @member {string} image Image. The name of the image in the image * repository. - * @member {string} [credentials.username] - * @member {string} [credentials.password] One of password or - * passwordSecretReference must be specified. - * @member {object} [credentials.passwordSecretReference] Users can store - * their secrets in Azure KeyVault and pass it to the Batch AI Service to - * integrate with KeyVault. One of password or passwordSecretReference must - * be specified. - * @member {object} [credentials.passwordSecretReference.sourceVault] + * @member {object} [credentials] Credentials. Credentials to access the + * private docker repository. + * @member {string} [credentials.username] User name to login to the + * repository. + * @member {string} [credentials.password] User password to login to the + * docker repository. One of password or passwordSecretReference must be + * specified. + * @member {object} [credentials.passwordSecretReference] KeyVault Secret + * storing the password. Users can store their secrets in Azure KeyVault and + * pass it to the Batch AI service to integrate with KeyVault. One of + * password or passwordSecretReference must be specified. + * @member {object} [credentials.passwordSecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [credentials.passwordSecretReference.sourceVault.id] The * ID of the resource - * @member {string} [credentials.passwordSecretReference.secretUrl] + * @member {string} [credentials.passwordSecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/index.d.ts b/lib/services/batchaiManagement/lib/models/index.d.ts index e64693f7db..8f84a42c28 100644 --- a/lib/services/batchaiManagement/lib/models/index.d.ts +++ b/lib/services/batchaiManagement/lib/models/index.d.ts @@ -58,13 +58,12 @@ export interface Usage { * Settings for user account that gets created on each on the nodes of a * cluster. * - * @member {string} adminUserName Specifies the name of the administrator - * account. - * @member {string} [adminUserSshPublicKey] SSH public keys used to - * authenticate with linux based VMs. This does not get returned in a GET - * response body. - * @member {string} [adminUserPassword] Admin user Password (linux only). This - * does not get returned in a GET response body. + * @member {string} adminUserName User name. Name of the administrator user + * account which can be used to SSH to nodes. + * @member {string} [adminUserSshPublicKey] SSH public key. SSH public key of + * the administrator user account. + * @member {string} [adminUserPassword] Password. Password of the administrator + * user account. */ export interface UserAccountSettings { adminUserName: string; @@ -76,16 +75,20 @@ export interface UserAccountSettings { * @class * Initializes a new instance of the SshConfiguration class. * @constructor - * SSH configuration settings for the VM - * - * @member {array} [publicIPsToAllow] List of source IP ranges to allow SSH - * connection to a node. Default value is '*' can be used to match all source - * IPs. Maximum number of IP ranges that can be specified are 400. - * @member {object} userAccountSettings Settings for user account to be created - * on a node. - * @member {string} [userAccountSettings.adminUserName] - * @member {string} [userAccountSettings.adminUserSshPublicKey] - * @member {string} [userAccountSettings.adminUserPassword] + * SSH configuration. + * + * @member {array} [publicIPsToAllow] Allowed public IPs. List of source IP + * ranges to allow SSH connection from. The default value is '*' (all source + * IPs are allowed). Maximum number of IP ranges that can be specified is 400. + * @member {object} userAccountSettings User account settings. Settings for + * administrator user account to be created on a node. The account can be used + * to establish SSH connection to the node. + * @member {string} [userAccountSettings.adminUserName] Name of the + * administrator user account which can be used to SSH to nodes. + * @member {string} [userAccountSettings.adminUserSshPublicKey] SSH public key + * of the administrator user account. + * @member {string} [userAccountSettings.adminUserPassword] Password of the + * administrator user account. */ export interface SshConfiguration { publicIPsToAllow?: string[]; @@ -96,18 +99,22 @@ export interface SshConfiguration { * @class * Initializes a new instance of the DataDisks class. * @constructor - * Settings for the data disk which would be created for the File Server. - * - * @member {number} diskSizeInGB Initial disk size in GB for blank data disks, - * and the new desired size for resizing existing data disks. - * @member {string} [cachingType] None, ReadOnly, ReadWrite. Default value is - * None. This property is not patchable. Possible values include: 'none', - * 'readonly', 'readwrite'. Default value: 'none' . - * @member {number} diskCount Number of data disks to be attached to the VM. - * RAID level 0 will be applied in the case of multiple disks. - * @member {string} storageAccountType Specifies the type of storage account to - * be used on the disk. Possible values are: Standard_LRS or Premium_LRS. - * Possible values include: 'Standard_LRS', 'Premium_LRS' + * Data disks settings. + * + * @member {number} diskSizeInGB Disk size in GB. Disk size in GB for the blank + * data disks. + * @member {string} [cachingType] Caching type. Caching type for the disks. + * Available values are none (default), readonly, readwrite. Caching type can + * be set only for VM sizes supporting premium storage. Possible values + * include: 'none', 'readonly', 'readwrite'. Default value: 'none' . + * @member {number} diskCount Number of data disks. Number of data disks + * attached to the File Server. If multiple disks attached, they will be + * configured in RAID level 0. + * @member {string} storageAccountType Storage account type. Type of storage + * account to be used on the disk. Possible values are: Standard_LRS or + * Premium_LRS. Premium storage account type can only be used with VM sizes + * supporting premium storage. Possible values include: 'Standard_LRS', + * 'Premium_LRS' */ export interface DataDisks { diskSizeInGB: number; @@ -133,12 +140,15 @@ export interface ResourceId extends BaseResource { * @class * Initializes a new instance of the MountSettings class. * @constructor - * Details of the File Server. + * File Server mount Information. * - * @member {string} [mountPoint] Path where the NFS is mounted on the Server. - * @member {string} [fileServerPublicIP] Public IP of the File Server VM. - * @member {string} [fileServerInternalIP] Internal subnet IP which can be used - * to access the file Server from within the subnet. + * @member {string} [mountPoint] Mount Point. Path where the data disks are + * mounted on the File Server. + * @member {string} [fileServerPublicIP] Public IP. Public IP address of the + * File Server which can be used to SSH to the node from outside of the subnet. + * @member {string} [fileServerInternalIP] Internal IP. Internal IP address of + * the File Server which can be used to access the File Server from within the + * subnet. */ export interface MountSettings { mountPoint?: string; @@ -148,70 +158,83 @@ export interface MountSettings { /** * @class - * Initializes a new instance of the Resource class. + * Initializes a new instance of the ProxyResource class. * @constructor - * A definition of an Azure resource. + * A definition of an Azure proxy resource. * - * @member {string} [id] The ID of the resource - * @member {string} [name] The name of the resource - * @member {string} [type] The type of the resource - * @member {string} [location] The location of the resource - * @member {object} [tags] The tags of the resource + * @member {string} [id] The ID of the resource. + * @member {string} [name] The name of the resource. + * @member {string} [type] The type of the resource. */ -export interface Resource extends BaseResource { +export interface ProxyResource extends BaseResource { readonly id?: string; readonly name?: string; readonly type?: string; - readonly location?: string; - readonly tags?: { [propertyName: string]: string }; } /** * @class * Initializes a new instance of the FileServer class. * @constructor - * Contains information about the File Server. - * - * @member {string} [vmSize] The size of the virtual machine of the File - * Server. For information about available VM sizes for File Server from the - * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). - * @member {object} [sshConfiguration] SSH settings for the File Server. - * @member {array} [sshConfiguration.publicIPsToAllow] Default value is '*' can - * be used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * @member {object} [sshConfiguration.userAccountSettings] - * @member {string} [sshConfiguration.userAccountSettings.adminUserName] + * File Server information. + * + * @member {string} [vmSize] VM size. VM size of the File Server. + * @member {object} [sshConfiguration] SSH configuration. SSH configuration for + * accessing the File Server node. + * @member {array} [sshConfiguration.publicIPsToAllow] List of source IP ranges + * to allow SSH connection from. The default value is '*' (all source IPs are + * allowed). Maximum number of IP ranges that can be specified is 400. + * @member {object} [sshConfiguration.userAccountSettings] Settings for + * administrator user account to be created on a node. The account can be used + * to establish SSH connection to the node. + * @member {string} [sshConfiguration.userAccountSettings.adminUserName] Name + * of the administrator user account which can be used to SSH to nodes. * @member {string} - * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] + * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH public key + * of the administrator user account. * @member {string} [sshConfiguration.userAccountSettings.adminUserPassword] - * @member {object} [dataDisks] Settings for the data disk which would be - * created for the File Server. - * @member {number} [dataDisks.diskSizeInGB] - * @member {string} [dataDisks.cachingType] Possible values include: 'none', - * 'readonly', 'readwrite' - * @member {number} [dataDisks.diskCount] - * @member {string} [dataDisks.storageAccountType] Possible values include: - * 'Standard_LRS', 'Premium_LRS' - * @member {object} [subnet] Specifies the identifier of the subnet. + * Password of the administrator user account. + * @member {object} [dataDisks] Data disks configuration. Information about + * disks attached to File Server VM. + * @member {number} [dataDisks.diskSizeInGB] Disk size in GB for the blank data + * disks. + * @member {string} [dataDisks.cachingType] Caching type for the disks. + * Available values are none (default), readonly, readwrite. Caching type can + * be set only for VM sizes supporting premium storage. Possible values + * include: 'none', 'readonly', 'readwrite' + * @member {number} [dataDisks.diskCount] Number of data disks attached to the + * File Server. If multiple disks attached, they will be configured in RAID + * level 0. + * @member {string} [dataDisks.storageAccountType] Type of storage account to + * be used on the disk. Possible values are: Standard_LRS or Premium_LRS. + * Premium storage account type can only be used with VM sizes supporting + * premium storage. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @member {object} [subnet] Subnet. File Server virtual network subnet + * resource ID. * @member {string} [subnet.id] The ID of the resource - * @member {object} [mountSettings] Details of the File Server. - * @member {string} [mountSettings.mountPoint] - * @member {string} [mountSettings.fileServerPublicIP] - * @member {string} [mountSettings.fileServerInternalIP] - * @member {date} [provisioningStateTransitionTime] Time when the status was - * changed. - * @member {date} [creationTime] Time when the FileServer was created. - * @member {string} [provisioningState] Specifies the provisioning state of the - * File Server. Possible values: creating - The File Server is getting created. - * updating - The File Server creation has been accepted and it is getting - * updated. deleting - The user has requested that the File Server be deleted, - * and it is in the process of being deleted. failed - The File Server creation - * has failed with the specified errorCode. Details about the error code are - * specified in the message field. succeeded - The File Server creation has - * succeeded. Possible values include: 'creating', 'updating', 'deleting', - * 'succeeded', 'failed' - */ -export interface FileServer extends Resource { + * @member {object} [mountSettings] Mount settings. File Server mount settings. + * @member {string} [mountSettings.mountPoint] Path where the data disks are + * mounted on the File Server. + * @member {string} [mountSettings.fileServerPublicIP] Public IP address of the + * File Server which can be used to SSH to the node from outside of the subnet. + * @member {string} [mountSettings.fileServerInternalIP] Internal IP address of + * the File Server which can be used to access the File Server from within the + * subnet. + * @member {date} [provisioningStateTransitionTime] Provisioning State + * Transition time. Time when the provisioning state was changed. + * @member {date} [creationTime] Creation time. Time when the FileServer was + * created. + * @member {string} [provisioningState] Provisioning state. Provisioning state + * of the File Server. Possible values: creating - The File Server is getting + * created; updating - The File Server creation has been accepted and it is + * getting updated; deleting - The user has requested that the File Server be + * deleted, and it is in the process of being deleted; failed - The File Server + * creation has failed with the specified error code. Details about the error + * code are specified in the message field; succeeded - The File Server + * creation has succeeded. Possible values include: 'creating', 'updating', + * 'deleting', 'succeeded', 'failed' + */ +export interface FileServer extends ProxyResource { vmSize?: string; sshConfiguration?: SshConfiguration; dataDisks?: DataDisks; @@ -226,11 +249,13 @@ export interface FileServer extends Resource { * @class * Initializes a new instance of the KeyVaultSecretReference class. * @constructor - * Describes a reference to Key Vault Secret. + * Key Vault Secret reference. * - * @member {object} sourceVault Fully qualified resource Id for the Key Vault. + * @member {object} sourceVault Key Vault resource identifier. Fully qualified + * resource indentifier of the Key Vault. * @member {string} [sourceVault.id] The ID of the resource - * @member {string} secretUrl The URL referencing a secret in a Key Vault. + * @member {string} secretUrl Secret URL. The URL referencing a secret in the + * Key Vault. */ export interface KeyVaultSecretReference { sourceVault: ResourceId; @@ -241,37 +266,47 @@ export interface KeyVaultSecretReference { * @class * Initializes a new instance of the FileServerCreateParameters class. * @constructor - * Parameters supplied to the Create operation. + * File Server creation parameters. * - * @member {string} location The region in which to create the File Server. - * @member {object} [tags] The user specified tags associated with the File - * Server. - * @member {string} vmSize The size of the virtual machine of the file server. - * For information about available VM sizes for fileservers from the Virtual + * @member {string} vmSize VM size. The size of the virtual machine for the + * File Server. For information about available VM sizes from the Virtual * Machines Marketplace, see Sizes for Virtual Machines (Linux). - * @member {object} sshConfiguration SSH configuration for the file server. - * @member {array} [sshConfiguration.publicIPsToAllow] Default value is '*' can - * be used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * @member {object} [sshConfiguration.userAccountSettings] - * @member {string} [sshConfiguration.userAccountSettings.adminUserName] + * @member {object} sshConfiguration SSH configuration. SSH configuration for + * the File Server node. + * @member {array} [sshConfiguration.publicIPsToAllow] List of source IP ranges + * to allow SSH connection from. The default value is '*' (all source IPs are + * allowed). Maximum number of IP ranges that can be specified is 400. + * @member {object} [sshConfiguration.userAccountSettings] Settings for + * administrator user account to be created on a node. The account can be used + * to establish SSH connection to the node. + * @member {string} [sshConfiguration.userAccountSettings.adminUserName] Name + * of the administrator user account which can be used to SSH to nodes. * @member {string} - * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] + * [sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH public key + * of the administrator user account. * @member {string} [sshConfiguration.userAccountSettings.adminUserPassword] - * @member {object} dataDisks Settings for the data disk which would be created - * for the file server. - * @member {number} [dataDisks.diskSizeInGB] - * @member {string} [dataDisks.cachingType] Possible values include: 'none', - * 'readonly', 'readwrite' - * @member {number} [dataDisks.diskCount] - * @member {string} [dataDisks.storageAccountType] Possible values include: - * 'Standard_LRS', 'Premium_LRS' - * @member {object} [subnet] Specifies the identifier of the subnet. + * Password of the administrator user account. + * @member {object} dataDisks Data disks. Settings for the data disks which + * will be created for the File Server. + * @member {number} [dataDisks.diskSizeInGB] Disk size in GB for the blank data + * disks. + * @member {string} [dataDisks.cachingType] Caching type for the disks. + * Available values are none (default), readonly, readwrite. Caching type can + * be set only for VM sizes supporting premium storage. Possible values + * include: 'none', 'readonly', 'readwrite' + * @member {number} [dataDisks.diskCount] Number of data disks attached to the + * File Server. If multiple disks attached, they will be configured in RAID + * level 0. + * @member {string} [dataDisks.storageAccountType] Type of storage account to + * be used on the disk. Possible values are: Standard_LRS or Premium_LRS. + * Premium storage account type can only be used with VM sizes supporting + * premium storage. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @member {object} [subnet] Subnet identifier. Identifier of an existing + * virtual network subnet to put the File Server in. If not provided, a new + * virtual network and subnet will be created. * @member {string} [subnet.id] The ID of the resource */ export interface FileServerCreateParameters { - location: string; - tags?: { [propertyName: string]: string }; vmSize: string; sshConfiguration: SshConfiguration; dataDisks: DataDisks; @@ -284,12 +319,11 @@ export interface FileServerCreateParameters { * @constructor * Manual scale settings for the cluster. * - * @member {number} targetNodeCount The desired number of compute nodes in the - * Cluster. Default is 0. If autoScaleSettings are not specified, then the - * Cluster starts with this target. Default value: 0 . - * @member {string} [nodeDeallocationOption] Determines what to do with the - * job(s) running on compute node if the Cluster size is decreasing. The - * default value is requeue. Possible values include: 'requeue', 'terminate', + * @member {number} targetNodeCount Target node count. The desired number of + * compute nodes in the Cluster. Default is 0. Default value: 0 . + * @member {string} [nodeDeallocationOption] Node deallocation options. An + * action to be performed when the cluster size is decreasing. The default + * value is requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion'. Default value: 'requeue' . */ export interface ManualScaleSettings { @@ -301,17 +335,19 @@ export interface ManualScaleSettings { * @class * Initializes a new instance of the AutoScaleSettings class. * @constructor - * The system automatically scales the cluster up and down (within - * minimumNodeCount and maximumNodeCount) based on the pending and running jobs - * on the cluster. + * Auto-scale settings for the cluster. The system automatically scales the + * cluster up and down (within minimumNodeCount and maximumNodeCount) based on + * the number of queued and running jobs assigned to the cluster. * - * @member {number} minimumNodeCount Specifies the minimum number of compute - * nodes the cluster can have. - * @member {number} maximumNodeCount Specifies the maximum number of compute - * nodes the cluster can have. - * @member {number} [initialNodeCount] Specifies the number of compute nodes to - * allocate on cluster creation. Note that this value is used only during - * cluster creation. Default value: 0 . + * @member {number} minimumNodeCount Minimum node count. The minimum number of + * compute nodes the Batch AI service will try to allocate for the cluster. + * Note, the actual number of nodes can be less than the specified value if the + * subscription has not enough quota to fulfill the request. + * @member {number} maximumNodeCount Maximum node count. The maximum number of + * compute nodes the cluster can have. + * @member {number} [initialNodeCount] Initial node count. The number of + * compute nodes to allocate on cluster creation. Note that this value is used + * only during cluster creation. Default: 0. Default value: 0 . */ export interface AutoScaleSettings { minimumNodeCount: number; @@ -328,17 +364,24 @@ export interface AutoScaleSettings { * specified, the system automatically scales the cluster up and down (within * the supplied limits) based on the pending jobs on the cluster. * - * @member {object} [manual] The scale for the cluster by manual settings. - * @member {number} [manual.targetNodeCount] Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. - * @member {string} [manual.nodeDeallocationOption] The default value is - * requeue. Possible values include: 'requeue', 'terminate', - * 'waitforjobcompletion' - * @member {object} [autoScale] The scale for the cluster by autoscale - * settings. - * @member {number} [autoScale.minimumNodeCount] - * @member {number} [autoScale.maximumNodeCount] - * @member {number} [autoScale.initialNodeCount] + * @member {object} [manual] Manual scale settings. Manual scale settings for + * the cluster. + * @member {number} [manual.targetNodeCount] The desired number of compute + * nodes in the Cluster. Default is 0. + * @member {string} [manual.nodeDeallocationOption] An action to be performed + * when the cluster size is decreasing. The default value is requeue. Possible + * values include: 'requeue', 'terminate', 'waitforjobcompletion' + * @member {object} [autoScale] Auto-scale settings. Auto-scale settings for + * the cluster. + * @member {number} [autoScale.minimumNodeCount] The minimum number of compute + * nodes the Batch AI service will try to allocate for the cluster. Note, the + * actual number of nodes can be less than the specified value if the + * subscription has not enough quota to fulfill the request. + * @member {number} [autoScale.maximumNodeCount] The maximum number of compute + * nodes the cluster can have. + * @member {number} [autoScale.initialNodeCount] The number of compute nodes to + * allocate on cluster creation. Note that this value is used only during + * cluster creation. Default: 0. */ export interface ScaleSettings { manual?: ManualScaleSettings; @@ -349,15 +392,15 @@ export interface ScaleSettings { * @class * Initializes a new instance of the ImageReference class. * @constructor - * The image reference. + * The OS image reference. * - * @member {string} publisher Publisher of the image. - * @member {string} offer Offer of the image. - * @member {string} sku SKU of the image. - * @member {string} [version] Version of the image. - * @member {string} [virtualMachineImageId] The ARM resource identifier of the - * virtual machine image. Computes nodes of the cluster will be created using - * this custom image. This is of the form + * @member {string} publisher Publisher. Publisher of the image. + * @member {string} offer Offer. Offer of the image. + * @member {string} sku SKU. SKU of the image. + * @member {string} [version] Version. Version of the image. + * @member {string} [virtualMachineImageId] Custom VM image resource ID. The + * ARM resource identifier of the virtual machine image for the compute nodes. + * This is of the form * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. * The virtual machine image must be in the same region and subscription as the * cluster. For information about the firewall settings for the Batch node @@ -378,17 +421,21 @@ export interface ImageReference { * @class * Initializes a new instance of the VirtualMachineConfiguration class. * @constructor - * Settings for OS image. - * - * @member {object} [imageReference] Reference to OS image. - * @member {string} [imageReference.publisher] - * @member {string} [imageReference.offer] - * @member {string} [imageReference.sku] - * @member {string} [imageReference.version] - * @member {string} [imageReference.virtualMachineImageId] The virtual machine - * image must be in the same region and subscription as the cluster. For - * information about the firewall settings for the Batch node agent to - * communicate with the Batch service see + * VM configuration. + * + * @member {object} [imageReference] Image reference. OS image reference for + * cluster nodes. + * @member {string} [imageReference.publisher] Publisher of the image. + * @member {string} [imageReference.offer] Offer of the image. + * @member {string} [imageReference.sku] SKU of the image. + * @member {string} [imageReference.version] Version of the image. + * @member {string} [imageReference.virtualMachineImageId] The ARM resource + * identifier of the virtual machine image for the compute nodes. This is of + * the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the + * cluster. For information about the firewall settings for the Batch node + * agent to communicate with the Batch service see * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. @@ -401,10 +448,10 @@ export interface VirtualMachineConfiguration { * @class * Initializes a new instance of the EnvironmentVariable class. * @constructor - * A collection of environment variables to set. + * An environment variable definition. * - * @member {string} name The name of the environment variable. - * @member {string} value The value of the environment variable. + * @member {string} name Name. The name of the environment variable. + * @member {string} value Value. The value of the environment variable. */ export interface EnvironmentVariable { name: string; @@ -415,20 +462,21 @@ export interface EnvironmentVariable { * @class * Initializes a new instance of the EnvironmentVariableWithSecretValue class. * @constructor - * A collection of environment variables with secret values to set. - * - * @member {string} name The name of the environment variable to store the - * secret value. - * @member {string} [value] The value of the environment variable. This value - * will never be reported back by Batch AI. - * @member {object} [valueSecretReference] Specifies the location of the Azure - * KeyVault secret which will be used as the environment variable value. - * Specifies KeyVault Store and Secret which contains the value for the - * environment variable. One of value or valueSecretReference must be provided. - * @member {object} [valueSecretReference.sourceVault] + * An environment variable with secret value definition. + * + * @member {string} name Name. The name of the environment variable to store + * the secret value. + * @member {string} [value] Value. The value of the environment variable. This + * value will never be reported back by Batch AI. + * @member {object} [valueSecretReference] KeyVault secret reference. KeyVault + * store and secret which contains the value for the environment variable. One + * of value or valueSecretReference must be provided. + * @member {object} [valueSecretReference.sourceVault] Fully qualified resource + * indentifier of the Key Vault. * @member {string} [valueSecretReference.sourceVault.id] The ID of the * resource - * @member {string} [valueSecretReference.secretUrl] + * @member {string} [valueSecretReference.secretUrl] The URL referencing a + * secret in the Key Vault. */ export interface EnvironmentVariableWithSecretValue { name: string; @@ -443,23 +491,23 @@ export interface EnvironmentVariableWithSecretValue { * Specifies a setup task which can be used to customize the compute nodes of * the cluster. * - * @member {string} commandLine Command line to be executed on each cluster's - * node after it being allocated or rebooted. Command line to be executed on - * each cluster's node after it being allocated or rebooted. The command is + * @member {string} commandLine Command line. The command line to be executed + * on each cluster's node after it being allocated or rebooted. The command is * executed in a bash subshell as a root. - * @member {array} [environmentVariables] Collection of environment variables - * to be set for setup task. - * @member {array} [secrets] Collection of environment variables with secret - * values to be set for setup task. Server will never report values of these - * variables back. - * @member {string} stdOutErrPathPrefix The prefix of a path where the Batch AI - * service will upload the stdout and stderr of the setup task. - * @member {string} [stdOutErrPathSuffix] A path segment appended by Batch AI - * to stdOutErrPathPrefix to form a path where stdout and stderr of the setup - * task will be uploaded. Batch AI creates the setup task output directories - * under an unique path to avoid conflicts between different clusters. You can - * concatinate stdOutErrPathPrefix and stdOutErrPathSuffix to get the full path - * to the output directory. + * @member {array} [environmentVariables] Environment variables. A collection + * of user defined environment variables to be set for setup task. + * @member {array} [secrets] Secrets. A collection of user defined environment + * variables with secret values to be set for the setup task. Server will never + * report values of these variables back. + * @member {string} stdOutErrPathPrefix Output path prefix. The prefix of a + * path where the Batch AI service will upload the stdout, stderr and execution + * log of the setup task. + * @member {string} [stdOutErrPathSuffix] Output path suffix. A path segment + * appended by Batch AI to stdOutErrPathPrefix to form a path where stdout, + * stderr and execution log of the setup task will be uploaded. Batch AI + * creates the setup task output directories under an unique path to avoid + * conflicts between different clusters. The full path can be obtained by + * concatenation of stdOutErrPathPrefix and stdOutErrPathSuffix. */ export interface SetupTask { commandLine: string; @@ -473,19 +521,19 @@ export interface SetupTask { * @class * Initializes a new instance of the AzureStorageCredentialsInfo class. * @constructor - * Credentials to access Azure File Share. + * Azure storage account credentials. * - * @member {string} [accountKey] Storage account key. One of accountKey or - * accountKeySecretReference must be specified. - * @member {object} [accountKeySecretReference] Specifies the location of the - * storage account key, which is a Key Vault Secret. Users can store their - * secrets in Azure KeyVault and pass it to the Batch AI Service to integrate - * with KeyVault. One of accountKey or accountKeySecretReference must be - * specified. - * @member {object} [accountKeySecretReference.sourceVault] + * @member {string} [accountKey] Account key. Storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [accountKeySecretReference] Account key secret reference. + * Information about KeyVault secret storing the storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [accountKeySecretReference.sourceVault] Fully qualified + * resource indentifier of the Key Vault. * @member {string} [accountKeySecretReference.sourceVault.id] The ID of the * resource - * @member {string} [accountKeySecretReference.secretUrl] + * @member {string} [accountKeySecretReference.secretUrl] The URL referencing a + * secret in the Key Vault. */ export interface AzureStorageCredentialsInfo { accountKey?: string; @@ -496,30 +544,33 @@ export interface AzureStorageCredentialsInfo { * @class * Initializes a new instance of the AzureFileShareReference class. * @constructor - * Details of the Azure File Share to mount on the cluster. + * Azure File Share mounting configuration. * - * @member {string} accountName Name of the storage account. - * @member {string} azureFileUrl URL to access the Azure File. - * @member {object} credentials Information of the Azure File credentials. - * @member {string} [credentials.accountKey] One of accountKey or + * @member {string} accountName Account name. Name of the Azure storage + * account. + * @member {string} azureFileUrl Azure File URL. URL to access the Azure File. + * @member {object} credentials Credentials. Information about the Azure + * storage credentials. + * @member {string} [credentials.accountKey] Storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [credentials.accountKeySecretReference] Information about + * KeyVault secret storing the storage account key. One of accountKey or * accountKeySecretReference must be specified. - * @member {object} [credentials.accountKeySecretReference] Users can store - * their secrets in Azure KeyVault and pass it to the Batch AI Service to - * integrate with KeyVault. One of accountKey or accountKeySecretReference must - * be specified. - * @member {object} [credentials.accountKeySecretReference.sourceVault] + * @member {object} [credentials.accountKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [credentials.accountKeySecretReference.sourceVault.id] The * ID of the resource - * @member {string} [credentials.accountKeySecretReference.secretUrl] - * @member {string} relativeMountPath Specifies the relative path on the - * compute node where the Azure file share will be mounted. Note that all + * @member {string} [credentials.accountKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. + * @member {string} relativeMountPath Relative mount path. The relative path on + * the compute node where the Azure File share will be mounted. Note that all * cluster level file shares will be mounted under $AZ_BATCHAI_MOUNT_ROOT * location and all job level file shares will be mounted under * $AZ_BATCHAI_JOB_MOUNT_ROOT. - * @member {string} [fileMode] Specifies the file mode. Default value is 0777. - * Valid only if OS is linux. Default value: '0777' . - * @member {string} [directoryMode] Specifies the directory Mode. Default value - * is 0777. Valid only if OS is linux. Default value: '0777' . + * @member {string} [fileMode] File mode. File mode for files on the mounted + * file share. Default value: 0777. Default value: '0777' . + * @member {string} [directoryMode] Directory mode. File mode for directories + * on the mounted file share. Default value: 0777. Default value: '0777' . */ export interface AzureFileShareReference { accountName: string; @@ -534,31 +585,32 @@ export interface AzureFileShareReference { * @class * Initializes a new instance of the AzureBlobFileSystemReference class. * @constructor - * Provides required information, for the service to be able to mount Azure - * Blob Storage container on the cluster nodes. + * Azure Blob Storage Container mounting configuration. * - * @member {string} accountName Name of the Azure Blob Storage account. - * @member {string} containerName Name of the Azure Blob Storage container to - * mount on the cluster. - * @member {object} credentials Information of the Azure Blob Storage account - * credentials. - * @member {string} [credentials.accountKey] One of accountKey or + * @member {string} accountName Account name. Name of the Azure storage + * account. + * @member {string} containerName Container name. Name of the Azure Blob + * Storage container to mount on the cluster. + * @member {object} credentials Credentials. Information about the Azure + * storage credentials. + * @member {string} [credentials.accountKey] Storage account key. One of + * accountKey or accountKeySecretReference must be specified. + * @member {object} [credentials.accountKeySecretReference] Information about + * KeyVault secret storing the storage account key. One of accountKey or * accountKeySecretReference must be specified. - * @member {object} [credentials.accountKeySecretReference] Users can store - * their secrets in Azure KeyVault and pass it to the Batch AI Service to - * integrate with KeyVault. One of accountKey or accountKeySecretReference must - * be specified. - * @member {object} [credentials.accountKeySecretReference.sourceVault] + * @member {object} [credentials.accountKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [credentials.accountKeySecretReference.sourceVault.id] The * ID of the resource - * @member {string} [credentials.accountKeySecretReference.secretUrl] - * @member {string} relativeMountPath Specifies the relative path on the - * compute node where the Azure Blob file system will be mounted. Note that all - * cluster level blob file systems will be mounted under $AZ_BATCHAI_MOUNT_ROOT - * location and all job level blob file systems will be mounted under + * @member {string} [credentials.accountKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. + * @member {string} relativeMountPath Relative mount path. The relative path on + * the compute node where the Azure File container will be mounted. Note that + * all cluster level containers will be mounted under $AZ_BATCHAI_MOUNT_ROOT + * location and all job level containers will be mounted under * $AZ_BATCHAI_JOB_MOUNT_ROOT. - * @member {string} [mountOptions] Specifies the various mount options that can - * be used to configure Blob file system. + * @member {string} [mountOptions] Mount options. Mount options for mounting + * blobfuse file system. */ export interface AzureBlobFileSystemReference { accountName: string; @@ -572,19 +624,21 @@ export interface AzureBlobFileSystemReference { * @class * Initializes a new instance of the FileServerReference class. * @constructor - * Provides required information, for the service to be able to mount Azure - * FileShare on the cluster nodes. + * File Server mounting configuration. * - * @member {object} fileServer Reference to the file server resource. + * @member {object} fileServer File server. Resource ID of the existing File + * Server to be mounted. * @member {string} [fileServer.id] The ID of the resource - * @member {string} [sourceDirectory] Specifies the source directory in File - * Server that needs to be mounted. If this property is not specified, the - * entire File Server will be mounted. - * @member {string} relativeMountPath Specifies the relative path on the - * compute node where the File Server will be mounted. Note that all cluster - * level file servers will be mounted under $AZ_BATCHAI_MOUNT_ROOT location and - * job level file servers will be mouted under $AZ_BATCHAI_JOB_MOUNT_ROOT. - * @member {string} [mountOptions] Specifies the mount options for File Server. + * @member {string} [sourceDirectory] Source directory. File Server directory + * that needs to be mounted. If this property is not specified, the entire File + * Server will be mounted. + * @member {string} relativeMountPath Relative mount path. The relative path on + * the compute node where the File Server will be mounted. Note that all + * cluster level file servers will be mounted under $AZ_BATCHAI_MOUNT_ROOT + * location and all job level file servers will be mounted under + * $AZ_BATCHAI_JOB_MOUNT_ROOT. + * @member {string} [mountOptions] Mount options. Mount options to be passed to + * mount command. */ export interface FileServerReference { fileServer: ResourceId; @@ -597,15 +651,15 @@ export interface FileServerReference { * @class * Initializes a new instance of the UnmanagedFileSystemReference class. * @constructor - * Details of the file system to mount on the compute cluster nodes. + * Unmananged file system mounting configuration. * - * @member {string} mountCommand Command used to mount the unmanaged file - * system. - * @member {string} relativeMountPath Specifies the relative path on the - * compute cluster node where the file system will be mounted. Note that all - * cluster level unmanaged file system will be mounted under - * $AZ_BATCHAI_MOUNT_ROOT location and job level unmanaged file system will be - * mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT. + * @member {string} mountCommand Mount command. Mount command line. Note, Batch + * AI will append mount path to the command on its own. + * @member {string} relativeMountPath Relative mount path. The relative path on + * the compute node where the unmanaged file system will be mounted. Note that + * all cluster level unmanaged file systems will be mounted under + * $AZ_BATCHAI_MOUNT_ROOT location and all job level unmanaged file systems + * will be mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT. */ export interface UnmanagedFileSystemReference { mountCommand: string; @@ -618,15 +672,14 @@ export interface UnmanagedFileSystemReference { * @constructor * Details of volumes to mount on the cluster. * - * @member {array} [azureFileShares] Azure File Share setup configuration. - * References to Azure File Shares that are to be mounted to the cluster nodes. - * @member {array} [azureBlobFileSystems] Azure Blob FileSystem setup - * configuration. References to Azure Blob FUSE that are to be mounted to the - * cluster nodes. - * @member {array} [fileServers] References to a list of file servers that are - * mounted to the cluster node. - * @member {array} [unmanagedFileSystems] References to a list of file servers - * that are mounted to the cluster node. + * @member {array} [azureFileShares] Azure File Shares. A collection of Azure + * File Shares that are to be mounted to the cluster nodes. + * @member {array} [azureBlobFileSystems] Azure Blob file systems. A collection + * of Azure Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [fileServers] File Servers. A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [unmanagedFileSystems] Unmanaged file systems. A collection + * of unmanaged file systems that are to be mounted to the cluster nodes. */ export interface MountVolumes { azureFileShares?: AzureFileShareReference[]; @@ -639,23 +692,23 @@ export interface MountVolumes { * @class * Initializes a new instance of the AppInsightsReference class. * @constructor - * Specifies Azure Application Insights information for performance counters - * reporting. + * Azure Application Insights information for performance counters reporting. * - * @member {object} component Specifies the Azure Application Insights - * component resource id. + * @member {object} component Component ID. Azure Application Insights + * component resource ID. * @member {string} [component.id] The ID of the resource - * @member {string} [instrumentationKey] Value of the Azure Application - * Insights instrumentation key. - * @member {object} [instrumentationKeySecretReference] Specifies a KeyVault - * Secret containing Azure Application Insights instrumentation key. Specifies - * KeyVault Store and Secret which contains Azure Application Insights - * instrumentation key. One of instumentationKey or + * @member {string} [instrumentationKey] Instrumentation Key. Value of the + * Azure Application Insights instrumentation key. + * @member {object} [instrumentationKeySecretReference] Instrumentation key + * KeyVault Secret reference. KeyVault Store and Secret which contains Azure + * Application Insights instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. - * @member {object} [instrumentationKeySecretReference.sourceVault] + * @member {object} [instrumentationKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [instrumentationKeySecretReference.sourceVault.id] The ID * of the resource - * @member {string} [instrumentationKeySecretReference.secretUrl] + * @member {string} [instrumentationKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. */ export interface AppInsightsReference { component: ResourceId; @@ -669,24 +722,28 @@ export interface AppInsightsReference { * @constructor * Performance counters reporting settings. * - * @member {object} appInsightsReference Specifies Azure Application Insights - * information for performance counters reporting. If provided, Batch AI will - * upload node performance counters to the corresponding Azure Application - * Insights account. - * @member {object} [appInsightsReference.component] + * @member {object} appInsightsReference Azure Application Insights reference. + * Azure Application Insights information for performance counters reporting. + * If provided, Batch AI will upload node performance counters to the + * corresponding Azure Application Insights account. + * @member {object} [appInsightsReference.component] Azure Application Insights + * component resource ID. * @member {string} [appInsightsReference.component.id] The ID of the resource - * @member {string} [appInsightsReference.instrumentationKey] + * @member {string} [appInsightsReference.instrumentationKey] Value of the + * Azure Application Insights instrumentation key. * @member {object} [appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} - * [appInsightsReference.instrumentationKeySecretReference.sourceVault] + * [appInsightsReference.instrumentationKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} * [appInsightsReference.instrumentationKeySecretReference.sourceVault.id] The * ID of the resource * @member {string} - * [appInsightsReference.instrumentationKeySecretReference.secretUrl] + * [appInsightsReference.instrumentationKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. */ export interface PerformanceCountersSettings { appInsightsReference: AppInsightsReference; @@ -696,62 +753,71 @@ export interface PerformanceCountersSettings { * @class * Initializes a new instance of the NodeSetup class. * @constructor - * Use this to prepare the VM. NOTE: The volumes specified in mountVolumes are - * mounted first and then the setupTask is run. Therefore the setup task can - * use local mountPaths in its execution. + * Node setup settings. * - * @member {object} [setupTask] Specifies a setup task which can be used to - * customize the compute nodes of the cluster. The NodeSetup task runs - * everytime a VM is rebooted. For that reason the task code needs to be - * idempotent. Generally it is used to either download static data that is - * required for all jobs that run on the cluster VMs or to download/install + * @member {object} [setupTask] Setup task. Setup task to run on cluster nodes + * when nodes got created or rebooted. The setup task code needs to be + * idempotent. Generally the setup task is used to download static data that is + * required for all jobs that run on the cluster VMs and/or to download/install * software. - * @member {string} [setupTask.commandLine] Command line to be executed on each - * cluster's node after it being allocated or rebooted. The command is executed - * in a bash subshell as a root. - * @member {array} [setupTask.environmentVariables] - * @member {array} [setupTask.secrets] Server will never report values of these - * variables back. + * @member {string} [setupTask.commandLine] The command line to be executed on + * each cluster's node after it being allocated or rebooted. The command is + * executed in a bash subshell as a root. + * @member {array} [setupTask.environmentVariables] A collection of user + * defined environment variables to be set for setup task. + * @member {array} [setupTask.secrets] A collection of user defined environment + * variables with secret values to be set for the setup task. Server will never + * report values of these variables back. * @member {string} [setupTask.stdOutErrPathPrefix] The prefix of a path where - * the Batch AI service will upload the stdout and stderr of the setup task. - * @member {string} [setupTask.stdOutErrPathSuffix] Batch AI creates the setup + * the Batch AI service will upload the stdout, stderr and execution log of the + * setup task. + * @member {string} [setupTask.stdOutErrPathSuffix] A path segment appended by + * Batch AI to stdOutErrPathPrefix to form a path where stdout, stderr and + * execution log of the setup task will be uploaded. Batch AI creates the setup * task output directories under an unique path to avoid conflicts between - * different clusters. You can concatinate stdOutErrPathPrefix and - * stdOutErrPathSuffix to get the full path to the output directory. - * @member {object} [mountVolumes] Information on shared volumes to be used by - * jobs. Specified mount volumes will be available to all jobs executing on the - * cluster. The volumes will be mounted at location specified by - * $AZ_BATCHAI_MOUNT_ROOT environment variable. - * @member {array} [mountVolumes.azureFileShares] References to Azure File + * different clusters. The full path can be obtained by concatenation of + * stdOutErrPathPrefix and stdOutErrPathSuffix. + * @member {object} [mountVolumes] Mount volumes. Mount volumes to be available + * to setup task and all jobs executing on the cluster. The volumes will be + * mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT environment + * variable. + * @member {array} [mountVolumes.azureFileShares] A collection of Azure File * Shares that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.azureBlobFileSystems] References to Azure Blob - * FUSE that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.fileServers] - * @member {array} [mountVolumes.unmanagedFileSystems] - * @member {object} [performanceCountersSettings] Specifies settings for - * performance counters collecting and uploading. - * @member {object} [performanceCountersSettings.appInsightsReference] If + * @member {array} [mountVolumes.azureBlobFileSystems] A collection of Azure + * Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.fileServers] A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.unmanagedFileSystems] A collection of + * unmanaged file systems that are to be mounted to the cluster nodes. + * @member {object} [performanceCountersSettings] Performance counters + * settings. Settings for performance counters collecting and uploading. + * @member {object} [performanceCountersSettings.appInsightsReference] Azure + * Application Insights information for performance counters reporting. If * provided, Batch AI will upload node performance counters to the * corresponding Azure Application Insights account. * @member {object} - * [performanceCountersSettings.appInsightsReference.component] + * [performanceCountersSettings.appInsightsReference.component] Azure + * Application Insights component resource ID. * @member {string} * [performanceCountersSettings.appInsightsReference.component.id] The ID of * the resource * @member {string} - * [performanceCountersSettings.appInsightsReference.instrumentationKey] + * [performanceCountersSettings.appInsightsReference.instrumentationKey] Value + * of the Azure Application Insights instrumentation key. * @member {object} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault.id] * The ID of the resource * @member {string} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl] + * The URL referencing a secret in the Key Vault. */ export interface NodeSetup { setupTask?: SetupTask; @@ -765,15 +831,16 @@ export interface NodeSetup { * @constructor * Counts of various compute node states on the cluster. * - * @member {number} [idleNodeCount] Number of compute nodes in idle state. - * @member {number} [runningNodeCount] Number of compute nodes which are - * running jobs. - * @member {number} [preparingNodeCount] Number of compute nodes which are - * being prepared. - * @member {number} [unusableNodeCount] Number of compute nodes which are - * unusable. - * @member {number} [leavingNodeCount] Number of compute nodes which are - * leaving the cluster. + * @member {number} [idleNodeCount] Idle node count. Number of compute nodes in + * idle state. + * @member {number} [runningNodeCount] Running node count. Number of compute + * nodes which are running jobs. + * @member {number} [preparingNodeCount] Preparing node count. Number of + * compute nodes which are being prepared. + * @member {number} [unusableNodeCount] Unusable node count. Number of compute + * nodes which are in unusable state. + * @member {number} [leavingNodeCount] Leaving node count. Number of compute + * nodes which are leaving the cluster. */ export interface NodeStateCounts { readonly idleNodeCount?: number; @@ -787,107 +854,143 @@ export interface NodeStateCounts { * @class * Initializes a new instance of the ClusterCreateParameters class. * @constructor - * Parameters supplied to the Create operation. + * Cluster creation operation. * - * @member {string} location The region in which to create the cluster. - * @member {object} [tags] The user specified tags associated with the Cluster. - * @member {string} vmSize The size of the virtual machines in the cluster. All - * virtual machines in a cluster are the same size. For information about + * @member {string} vmSize VM size. The size of the virtual machines in the + * cluster. All nodes in a cluster have the same VM size. For information about * available VM sizes for clusters using images from the Virtual Machines - * Marketplace (see Sizes for Virtual Machines (Linux) or Sizes for Virtual - * Machines (Windows). Batch AI service supports all Azure VM sizes except - * STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and - * STANDARD_DSV2 series). - * @member {string} [vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority'. Default - * value: 'dedicated' . - * @member {object} [scaleSettings] Desired scale for the cluster. - * @member {object} [scaleSettings.manual] - * @member {number} [scaleSettings.manual.targetNodeCount] Default is 0. If - * autoScaleSettings are not specified, then the Cluster starts with this - * target. - * @member {string} [scaleSettings.manual.nodeDeallocationOption] The default - * value is requeue. Possible values include: 'requeue', 'terminate', + * Marketplace see Sizes for Virtual Machines (Linux). Batch AI service + * supports all Azure VM sizes except STANDARD_A0 and those with premium + * storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + * @member {string} [vmPriority] VM priority. VM priority. Allowed values are: + * dedicated (default) and lowpriority. Possible values include: 'dedicated', + * 'lowpriority'. Default value: 'dedicated' . + * @member {object} [scaleSettings] Scale settings. Scale settings for the + * cluster. Batch AI service supports manual and auto scale clusters. + * @member {object} [scaleSettings.manual] Manual scale settings for the + * cluster. + * @member {number} [scaleSettings.manual.targetNodeCount] The desired number + * of compute nodes in the Cluster. Default is 0. + * @member {string} [scaleSettings.manual.nodeDeallocationOption] An action to + * be performed when the cluster size is decreasing. The default value is + * requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion' - * @member {object} [scaleSettings.autoScale] - * @member {number} [scaleSettings.autoScale.minimumNodeCount] - * @member {number} [scaleSettings.autoScale.maximumNodeCount] - * @member {number} [scaleSettings.autoScale.initialNodeCount] - * @member {object} [virtualMachineConfiguration] Settings for OS image and - * mounted data volumes. - * @member {object} [virtualMachineConfiguration.imageReference] + * @member {object} [scaleSettings.autoScale] Auto-scale settings for the + * cluster. + * @member {number} [scaleSettings.autoScale.minimumNodeCount] The minimum + * number of compute nodes the Batch AI service will try to allocate for the + * cluster. Note, the actual number of nodes can be less than the specified + * value if the subscription has not enough quota to fulfill the request. + * @member {number} [scaleSettings.autoScale.maximumNodeCount] The maximum + * number of compute nodes the cluster can have. + * @member {number} [scaleSettings.autoScale.initialNodeCount] The number of + * compute nodes to allocate on cluster creation. Note that this value is used + * only during cluster creation. Default: 0. + * @member {object} [virtualMachineConfiguration] VM configuration. OS image + * configuration for cluster nodes. All nodes in a cluster have the same OS + * image. + * @member {object} [virtualMachineConfiguration.imageReference] OS image + * reference for cluster nodes. * @member {string} [virtualMachineConfiguration.imageReference.publisher] - * @member {string} [virtualMachineConfiguration.imageReference.offer] - * @member {string} [virtualMachineConfiguration.imageReference.sku] + * Publisher of the image. + * @member {string} [virtualMachineConfiguration.imageReference.offer] Offer of + * the image. + * @member {string} [virtualMachineConfiguration.imageReference.sku] SKU of the + * image. * @member {string} [virtualMachineConfiguration.imageReference.version] + * Version of the image. * @member {string} - * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The - * virtual machine image must be in the same region and subscription as the + * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The ARM + * resource identifier of the virtual machine image for the compute nodes. This + * is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the * cluster. For information about the firewall settings for the Batch node * agent to communicate with the Batch service see * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. - * @member {object} [nodeSetup] Setup to be done on all compute nodes in the - * cluster. - * @member {object} [nodeSetup.setupTask] - * @member {string} [nodeSetup.setupTask.commandLine] Command line to be + * @member {object} [nodeSetup] Node setup. Setup to be performed on each + * compute node in the cluster. + * @member {object} [nodeSetup.setupTask] Setup task to run on cluster nodes + * when nodes got created or rebooted. The setup task code needs to be + * idempotent. Generally the setup task is used to download static data that is + * required for all jobs that run on the cluster VMs and/or to download/install + * software. + * @member {string} [nodeSetup.setupTask.commandLine] The command line to be * executed on each cluster's node after it being allocated or rebooted. The * command is executed in a bash subshell as a root. - * @member {array} [nodeSetup.setupTask.environmentVariables] - * @member {array} [nodeSetup.setupTask.secrets] Server will never report - * values of these variables back. + * @member {array} [nodeSetup.setupTask.environmentVariables] A collection of + * user defined environment variables to be set for setup task. + * @member {array} [nodeSetup.setupTask.secrets] A collection of user defined + * environment variables with secret values to be set for the setup task. + * Server will never report values of these variables back. * @member {string} [nodeSetup.setupTask.stdOutErrPathPrefix] The prefix of a - * path where the Batch AI service will upload the stdout and stderr of the - * setup task. - * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] Batch AI creates - * the setup task output directories under an unique path to avoid conflicts - * between different clusters. You can concatinate stdOutErrPathPrefix and - * stdOutErrPathSuffix to get the full path to the output directory. - * @member {object} [nodeSetup.mountVolumes] Specified mount volumes will be - * available to all jobs executing on the cluster. The volumes will be mounted - * at location specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. - * @member {array} [nodeSetup.mountVolumes.azureFileShares] References to Azure - * File Shares that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] References to - * Azure Blob FUSE that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.fileServers] - * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] - * @member {object} [nodeSetup.performanceCountersSettings] + * path where the Batch AI service will upload the stdout, stderr and execution + * log of the setup task. + * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] A path segment + * appended by Batch AI to stdOutErrPathPrefix to form a path where stdout, + * stderr and execution log of the setup task will be uploaded. Batch AI + * creates the setup task output directories under an unique path to avoid + * conflicts between different clusters. The full path can be obtained by + * concatenation of stdOutErrPathPrefix and stdOutErrPathSuffix. + * @member {object} [nodeSetup.mountVolumes] Mount volumes to be available to + * setup task and all jobs executing on the cluster. The volumes will be + * mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT environment + * variable. + * @member {array} [nodeSetup.mountVolumes.azureFileShares] A collection of + * Azure File Shares that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] A collection + * of Azure Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.fileServers] A collection of Batch + * AI File Servers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] A collection + * of unmanaged file systems that are to be mounted to the cluster nodes. + * @member {object} [nodeSetup.performanceCountersSettings] Settings for + * performance counters collecting and uploading. * @member {object} - * [nodeSetup.performanceCountersSettings.appInsightsReference] If provided, - * Batch AI will upload node performance counters to the corresponding Azure - * Application Insights account. + * [nodeSetup.performanceCountersSettings.appInsightsReference] Azure + * Application Insights information for performance counters reporting. If + * provided, Batch AI will upload node performance counters to the + * corresponding Azure Application Insights account. * @member {object} - * [nodeSetup.performanceCountersSettings.appInsightsReference.component] + * [nodeSetup.performanceCountersSettings.appInsightsReference.component] Azure + * Application Insights component resource ID. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.component.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Value of the Azure Application Insights instrumentation key. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl] - * @member {object} userAccountSettings Settings for user account that will be - * created on all compute nodes of the cluster. - * @member {string} [userAccountSettings.adminUserName] - * @member {string} [userAccountSettings.adminUserSshPublicKey] - * @member {string} [userAccountSettings.adminUserPassword] - * @member {object} [subnet] Specifies the identifier of the subnet. . + * The URL referencing a secret in the Key Vault. + * @member {object} userAccountSettings User account settings. Settings for an + * administrator user account that will be created on each compute node in the + * cluster. + * @member {string} [userAccountSettings.adminUserName] Name of the + * administrator user account which can be used to SSH to nodes. + * @member {string} [userAccountSettings.adminUserSshPublicKey] SSH public key + * of the administrator user account. + * @member {string} [userAccountSettings.adminUserPassword] Password of the + * administrator user account. + * @member {object} [subnet] Subnet. Existing virtual network subnet to put the + * cluster nodes in. Note, if a File Server mount configured in node setup, the + * File Server's subnet will be used automatically. * @member {string} [subnet.id] The ID of the resource */ export interface ClusterCreateParameters { - location: string; - tags?: { [propertyName: string]: string }; vmSize: string; vmPriority?: string; scaleSettings?: ScaleSettings; @@ -901,24 +1004,31 @@ export interface ClusterCreateParameters { * @class * Initializes a new instance of the ClusterUpdateParameters class. * @constructor - * Parameters supplied to the Update operation. + * Cluster update parameters. * - * @member {object} [tags] The user specified tags associated with the Cluster. - * @member {object} [scaleSettings] Desired scale for the cluster. - * @member {object} [scaleSettings.manual] - * @member {number} [scaleSettings.manual.targetNodeCount] Default is 0. If - * autoScaleSettings are not specified, then the Cluster starts with this - * target. - * @member {string} [scaleSettings.manual.nodeDeallocationOption] The default - * value is requeue. Possible values include: 'requeue', 'terminate', + * @member {object} [scaleSettings] Scale settings. Desired scale settings for + * the cluster. Batch AI service supports manual and auto scale clusters. + * @member {object} [scaleSettings.manual] Manual scale settings for the + * cluster. + * @member {number} [scaleSettings.manual.targetNodeCount] The desired number + * of compute nodes in the Cluster. Default is 0. + * @member {string} [scaleSettings.manual.nodeDeallocationOption] An action to + * be performed when the cluster size is decreasing. The default value is + * requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion' - * @member {object} [scaleSettings.autoScale] - * @member {number} [scaleSettings.autoScale.minimumNodeCount] - * @member {number} [scaleSettings.autoScale.maximumNodeCount] - * @member {number} [scaleSettings.autoScale.initialNodeCount] + * @member {object} [scaleSettings.autoScale] Auto-scale settings for the + * cluster. + * @member {number} [scaleSettings.autoScale.minimumNodeCount] The minimum + * number of compute nodes the Batch AI service will try to allocate for the + * cluster. Note, the actual number of nodes can be less than the specified + * value if the subscription has not enough quota to fulfill the request. + * @member {number} [scaleSettings.autoScale.maximumNodeCount] The maximum + * number of compute nodes the cluster can have. + * @member {number} [scaleSettings.autoScale.initialNodeCount] The number of + * compute nodes to allocate on cluster creation. Note that this value is used + * only during cluster creation. Default: 0. */ export interface ClusterUpdateParameters { - tags?: { [propertyName: string]: string }; scaleSettings?: ScaleSettings; } @@ -926,10 +1036,10 @@ export interface ClusterUpdateParameters { * @class * Initializes a new instance of the NameValuePair class. * @constructor - * Represents a name-value pair. + * Name-value pair. * - * @member {string} [name] The name in the name-value pair. - * @member {string} [value] The value in the name-value pair. + * @member {string} [name] Name. The name in the name-value pair. + * @member {string} [value] Value. The value in the name-value pair. */ export interface NameValuePair { name?: string; @@ -942,7 +1052,7 @@ export interface NameValuePair { * @constructor * An error response from the Batch AI service. * - * @member {string} [code] An identifier for the error. Codes are invariant and + * @member {string} [code] An identifier of the error. Codes are invariant and * are intended to be consumed programmatically. * @member {string} [message] A message describing the error, intended to be * suitable for display in a user interface. @@ -958,136 +1068,174 @@ export interface BatchAIError { * @class * Initializes a new instance of the Cluster class. * @constructor - * Contains information about a Cluster. + * Information about a Cluster. * - * @member {string} [vmSize] The size of the virtual machines in the cluster. - * All virtual machines in a cluster are the same size. For information about - * available VM sizes for clusters using images from the Virtual Machines - * Marketplace (see Sizes for Virtual Machines (Linux) or Sizes for Virtual - * Machines (Windows). Batch AI service supports all Azure VM sizes except - * STANDARD_A0 and those with premium storage (STANDARD_GS, STANDARD_DS, and - * STANDARD_DSV2 series). - * @member {string} [vmPriority] dedicated or lowpriority. The default value is - * dedicated. The node can get preempted while the task is running if - * lowpriority is choosen. This is best suited if the workload is checkpointing - * and can be restarted. Possible values include: 'dedicated', 'lowpriority'. - * Default value: 'dedicated' . - * @member {object} [scaleSettings] Desired scale for the Cluster. - * @member {object} [scaleSettings.manual] - * @member {number} [scaleSettings.manual.targetNodeCount] Default is 0. If - * autoScaleSettings are not specified, then the Cluster starts with this - * target. - * @member {string} [scaleSettings.manual.nodeDeallocationOption] The default - * value is requeue. Possible values include: 'requeue', 'terminate', + * @member {string} [vmSize] VM size. The size of the virtual machines in the + * cluster. All nodes in a cluster have the same VM size. + * @member {string} [vmPriority] VM priority. VM priority of cluster nodes. + * Possible values include: 'dedicated', 'lowpriority'. Default value: + * 'dedicated' . + * @member {object} [scaleSettings] Scale settings. Scale settings of the + * cluster. + * @member {object} [scaleSettings.manual] Manual scale settings for the + * cluster. + * @member {number} [scaleSettings.manual.targetNodeCount] The desired number + * of compute nodes in the Cluster. Default is 0. + * @member {string} [scaleSettings.manual.nodeDeallocationOption] An action to + * be performed when the cluster size is decreasing. The default value is + * requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion' - * @member {object} [scaleSettings.autoScale] - * @member {number} [scaleSettings.autoScale.minimumNodeCount] - * @member {number} [scaleSettings.autoScale.maximumNodeCount] - * @member {number} [scaleSettings.autoScale.initialNodeCount] - * @member {object} [virtualMachineConfiguration] Settings for OS image and - * mounted data volumes. - * @member {object} [virtualMachineConfiguration.imageReference] + * @member {object} [scaleSettings.autoScale] Auto-scale settings for the + * cluster. + * @member {number} [scaleSettings.autoScale.minimumNodeCount] The minimum + * number of compute nodes the Batch AI service will try to allocate for the + * cluster. Note, the actual number of nodes can be less than the specified + * value if the subscription has not enough quota to fulfill the request. + * @member {number} [scaleSettings.autoScale.maximumNodeCount] The maximum + * number of compute nodes the cluster can have. + * @member {number} [scaleSettings.autoScale.initialNodeCount] The number of + * compute nodes to allocate on cluster creation. Note that this value is used + * only during cluster creation. Default: 0. + * @member {object} [virtualMachineConfiguration] VM configuration. Virtual + * machine configuration (OS image) of the compute nodes. All nodes in a + * cluster have the same OS image configuration. + * @member {object} [virtualMachineConfiguration.imageReference] OS image + * reference for cluster nodes. * @member {string} [virtualMachineConfiguration.imageReference.publisher] - * @member {string} [virtualMachineConfiguration.imageReference.offer] - * @member {string} [virtualMachineConfiguration.imageReference.sku] + * Publisher of the image. + * @member {string} [virtualMachineConfiguration.imageReference.offer] Offer of + * the image. + * @member {string} [virtualMachineConfiguration.imageReference.sku] SKU of the + * image. * @member {string} [virtualMachineConfiguration.imageReference.version] + * Version of the image. * @member {string} - * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The - * virtual machine image must be in the same region and subscription as the + * [virtualMachineConfiguration.imageReference.virtualMachineImageId] The ARM + * resource identifier of the virtual machine image for the compute nodes. This + * is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the * cluster. For information about the firewall settings for the Batch node * agent to communicate with the Batch service see * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. - * @member {object} [nodeSetup] Setup to be done on all compute nodes in the - * Cluster. - * @member {object} [nodeSetup.setupTask] - * @member {string} [nodeSetup.setupTask.commandLine] Command line to be + * @member {object} [nodeSetup] Node setup. Setup (mount file systems, + * performance counters settings and custom setup task) to be performed on each + * compute node in the cluster. + * @member {object} [nodeSetup.setupTask] Setup task to run on cluster nodes + * when nodes got created or rebooted. The setup task code needs to be + * idempotent. Generally the setup task is used to download static data that is + * required for all jobs that run on the cluster VMs and/or to download/install + * software. + * @member {string} [nodeSetup.setupTask.commandLine] The command line to be * executed on each cluster's node after it being allocated or rebooted. The * command is executed in a bash subshell as a root. - * @member {array} [nodeSetup.setupTask.environmentVariables] - * @member {array} [nodeSetup.setupTask.secrets] Server will never report - * values of these variables back. + * @member {array} [nodeSetup.setupTask.environmentVariables] A collection of + * user defined environment variables to be set for setup task. + * @member {array} [nodeSetup.setupTask.secrets] A collection of user defined + * environment variables with secret values to be set for the setup task. + * Server will never report values of these variables back. * @member {string} [nodeSetup.setupTask.stdOutErrPathPrefix] The prefix of a - * path where the Batch AI service will upload the stdout and stderr of the - * setup task. - * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] Batch AI creates - * the setup task output directories under an unique path to avoid conflicts - * between different clusters. You can concatinate stdOutErrPathPrefix and - * stdOutErrPathSuffix to get the full path to the output directory. - * @member {object} [nodeSetup.mountVolumes] Specified mount volumes will be - * available to all jobs executing on the cluster. The volumes will be mounted - * at location specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. - * @member {array} [nodeSetup.mountVolumes.azureFileShares] References to Azure - * File Shares that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] References to - * Azure Blob FUSE that are to be mounted to the cluster nodes. - * @member {array} [nodeSetup.mountVolumes.fileServers] - * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] - * @member {object} [nodeSetup.performanceCountersSettings] + * path where the Batch AI service will upload the stdout, stderr and execution + * log of the setup task. + * @member {string} [nodeSetup.setupTask.stdOutErrPathSuffix] A path segment + * appended by Batch AI to stdOutErrPathPrefix to form a path where stdout, + * stderr and execution log of the setup task will be uploaded. Batch AI + * creates the setup task output directories under an unique path to avoid + * conflicts between different clusters. The full path can be obtained by + * concatenation of stdOutErrPathPrefix and stdOutErrPathSuffix. + * @member {object} [nodeSetup.mountVolumes] Mount volumes to be available to + * setup task and all jobs executing on the cluster. The volumes will be + * mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT environment + * variable. + * @member {array} [nodeSetup.mountVolumes.azureFileShares] A collection of + * Azure File Shares that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.azureBlobFileSystems] A collection + * of Azure Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.fileServers] A collection of Batch + * AI File Servers that are to be mounted to the cluster nodes. + * @member {array} [nodeSetup.mountVolumes.unmanagedFileSystems] A collection + * of unmanaged file systems that are to be mounted to the cluster nodes. + * @member {object} [nodeSetup.performanceCountersSettings] Settings for + * performance counters collecting and uploading. * @member {object} - * [nodeSetup.performanceCountersSettings.appInsightsReference] If provided, - * Batch AI will upload node performance counters to the corresponding Azure - * Application Insights account. + * [nodeSetup.performanceCountersSettings.appInsightsReference] Azure + * Application Insights information for performance counters reporting. If + * provided, Batch AI will upload node performance counters to the + * corresponding Azure Application Insights account. * @member {object} - * [nodeSetup.performanceCountersSettings.appInsightsReference.component] + * [nodeSetup.performanceCountersSettings.appInsightsReference.component] Azure + * Application Insights component resource ID. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.component.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Value of the Azure Application Insights instrumentation key. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault.id] * The ID of the resource * @member {string} * [nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl] - * @member {object} [userAccountSettings] Settings for user account of compute - * nodes. - * @member {string} [userAccountSettings.adminUserName] - * @member {string} [userAccountSettings.adminUserSshPublicKey] - * @member {string} [userAccountSettings.adminUserPassword] - * @member {object} [subnet] Specifies the identifier of the subnet. + * The URL referencing a secret in the Key Vault. + * @member {object} [userAccountSettings] User account settings. Administrator + * user account settings which can be used to SSH to compute nodes. + * @member {string} [userAccountSettings.adminUserName] Name of the + * administrator user account which can be used to SSH to nodes. + * @member {string} [userAccountSettings.adminUserSshPublicKey] SSH public key + * of the administrator user account. + * @member {string} [userAccountSettings.adminUserPassword] Password of the + * administrator user account. + * @member {object} [subnet] Subnet. Virtual network subnet resource ID the + * cluster nodes belong to. * @member {string} [subnet.id] The ID of the resource - * @member {date} [creationTime] The creation time of the cluster. - * @member {string} [provisioningState] Specifies the provisioning state of the - * cluster. Possible value are: creating - Specifies that the cluster is being - * created. succeeded - Specifies that the cluster has been created + * @member {date} [creationTime] Creation time. The time when the cluster was + * created. + * @member {string} [provisioningState] Provisioning state. Provisioning state + * of the cluster. Possible value are: creating - Specifies that the cluster is + * being created. succeeded - Specifies that the cluster has been created * successfully. failed - Specifies that the cluster creation has failed. * deleting - Specifies that the cluster is being deleted. Possible values * include: 'creating', 'succeeded', 'failed', 'deleting' - * @member {date} [provisioningStateTransitionTime] The provisioning state - * transition time of the cluster. - * @member {string} [allocationState] Indicates whether the cluster is - * resizing. Possible values are: steady and resizing. steady state indicates - * that the cluster is not resizing. There are no changes to the number of - * compute nodes in the cluster in progress. A cluster enters this state when - * it is created and when no operations are being performed on the cluster to - * change the number of compute nodes. resizing state indicates that the - * cluster is resizing; that is, compute nodes are being added to or removed - * from the cluster. Possible values include: 'steady', 'resizing' - * @member {date} [allocationStateTransitionTime] The time at which the cluster - * entered its current allocation state. - * @member {array} [errors] Contains details of various errors on the cluster - * including resize and node setup task. This element contains all the errors - * encountered by various compute nodes during node setup. - * @member {number} [currentNodeCount] The number of compute nodes currently - * assigned to the cluster. - * @member {object} [nodeStateCounts] Counts of various node states on the - * cluster. - * @member {number} [nodeStateCounts.idleNodeCount] - * @member {number} [nodeStateCounts.runningNodeCount] - * @member {number} [nodeStateCounts.preparingNodeCount] - * @member {number} [nodeStateCounts.unusableNodeCount] - * @member {number} [nodeStateCounts.leavingNodeCount] - */ -export interface Cluster extends Resource { + * @member {date} [provisioningStateTransitionTime] Provisioning State + * Transition time. Time when the provisioning state was changed. + * @member {string} [allocationState] Allocation state. Allocation state of the + * cluster. Possible values are: steady - Indicates that the cluster is not + * resizing. There are no changes to the number of compute nodes in the cluster + * in progress. A cluster enters this state when it is created and when no + * operations are being performed on the cluster to change the number of + * compute nodes. resizing - Indicates that the cluster is resizing; that is, + * compute nodes are being added to or removed from the cluster. Possible + * values include: 'steady', 'resizing' + * @member {date} [allocationStateTransitionTime] Allocation state transition + * time. The time at which the cluster entered its current allocation state. + * @member {array} [errors] Errors. Collection of errors encountered by various + * compute nodes during node setup. + * @member {number} [currentNodeCount] Current node count. The number of + * compute nodes currently assigned to the cluster. + * @member {object} [nodeStateCounts] Node state counts. Counts of various node + * states on the cluster. + * @member {number} [nodeStateCounts.idleNodeCount] Number of compute nodes in + * idle state. + * @member {number} [nodeStateCounts.runningNodeCount] Number of compute nodes + * which are running jobs. + * @member {number} [nodeStateCounts.preparingNodeCount] Number of compute + * nodes which are being prepared. + * @member {number} [nodeStateCounts.unusableNodeCount] Number of compute nodes + * which are in unusable state. + * @member {number} [nodeStateCounts.leavingNodeCount] Number of compute nodes + * which are leaving the cluster. + */ +export interface Cluster extends ProxyResource { vmSize?: string; vmPriority?: string; scaleSettings?: ScaleSettings; @@ -1111,17 +1259,19 @@ export interface Cluster extends Resource { * @constructor * Credentials to access a container image in a private repository. * - * @member {string} username User name to login. - * @member {string} [password] Password to login. One of password or - * passwordSecretReference must be specified. - * @member {object} [passwordSecretReference] Specifies the location of the - * password, which is a Key Vault Secret. Users can store their secrets in - * Azure KeyVault and pass it to the Batch AI Service to integrate with - * KeyVault. One of password or passwordSecretReference must be specified. - * @member {object} [passwordSecretReference.sourceVault] + * @member {string} username User name. User name to login to the repository. + * @member {string} [password] Password. User password to login to the docker + * repository. One of password or passwordSecretReference must be specified. + * @member {object} [passwordSecretReference] Password secret reference. + * KeyVault Secret storing the password. Users can store their secrets in Azure + * KeyVault and pass it to the Batch AI service to integrate with KeyVault. One + * of password or passwordSecretReference must be specified. + * @member {object} [passwordSecretReference.sourceVault] Fully qualified + * resource indentifier of the Key Vault. * @member {string} [passwordSecretReference.sourceVault.id] The ID of the * resource - * @member {string} [passwordSecretReference.secretUrl] + * @member {string} [passwordSecretReference.secretUrl] The URL referencing a + * secret in the Key Vault. */ export interface PrivateRegistryCredentials { username: string; @@ -1133,22 +1283,26 @@ export interface PrivateRegistryCredentials { * @class * Initializes a new instance of the ImageSourceRegistry class. * @constructor - * Details of the container image such as name, URL and credentials. + * Information about docker image for the job. * - * @member {string} [serverUrl] URL for image repository. - * @member {string} image The name of the image in image repository. - * @member {object} [credentials] Information to access the private Docker + * @member {string} [serverUrl] Server URL. URL for image repository. + * @member {string} image Image. The name of the image in the image repository. + * @member {object} [credentials] Credentials. Credentials to access the + * private docker repository. + * @member {string} [credentials.username] User name to login to the * repository. - * @member {string} [credentials.username] - * @member {string} [credentials.password] One of password or - * passwordSecretReference must be specified. - * @member {object} [credentials.passwordSecretReference] Users can store their - * secrets in Azure KeyVault and pass it to the Batch AI Service to integrate - * with KeyVault. One of password or passwordSecretReference must be specified. - * @member {object} [credentials.passwordSecretReference.sourceVault] + * @member {string} [credentials.password] User password to login to the docker + * repository. One of password or passwordSecretReference must be specified. + * @member {object} [credentials.passwordSecretReference] KeyVault Secret + * storing the password. Users can store their secrets in Azure KeyVault and + * pass it to the Batch AI service to integrate with KeyVault. One of password + * or passwordSecretReference must be specified. + * @member {object} [credentials.passwordSecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} [credentials.passwordSecretReference.sourceVault.id] The ID * of the resource - * @member {string} [credentials.passwordSecretReference.secretUrl] + * @member {string} [credentials.passwordSecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. */ export interface ImageSourceRegistry { serverUrl?: string; @@ -1160,53 +1314,64 @@ export interface ImageSourceRegistry { * @class * Initializes a new instance of the ContainerSettings class. * @constructor - * Settings for the container to be downloaded. - * - * @member {object} imageSourceRegistry Registry to download the container - * from. - * @member {string} [imageSourceRegistry.serverUrl] - * @member {string} [imageSourceRegistry.image] - * @member {object} [imageSourceRegistry.credentials] - * @member {string} [imageSourceRegistry.credentials.username] - * @member {string} [imageSourceRegistry.credentials.password] One of password - * or passwordSecretReference must be specified. + * Docker container settings. + * + * @member {object} imageSourceRegistry Image source registry. Information + * about docker image and docker registry to download the container from. + * @member {string} [imageSourceRegistry.serverUrl] URL for image repository. + * @member {string} [imageSourceRegistry.image] The name of the image in the + * image repository. + * @member {object} [imageSourceRegistry.credentials] Credentials to access the + * private docker repository. + * @member {string} [imageSourceRegistry.credentials.username] User name to + * login to the repository. + * @member {string} [imageSourceRegistry.credentials.password] User password to + * login to the docker repository. One of password or passwordSecretReference + * must be specified. * @member {object} [imageSourceRegistry.credentials.passwordSecretReference] - * Users can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * KeyVault Secret storing the password. Users can store their secrets in Azure + * KeyVault and pass it to the Batch AI service to integrate with KeyVault. One + * of password or passwordSecretReference must be specified. * @member {object} - * [imageSourceRegistry.credentials.passwordSecretReference.sourceVault] + * [imageSourceRegistry.credentials.passwordSecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} * [imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id] The * ID of the resource * @member {string} - * [imageSourceRegistry.credentials.passwordSecretReference.secretUrl] + * [imageSourceRegistry.credentials.passwordSecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. + * @member {string} [shmSize] /dev/shm size. Size of /dev/shm. Please refer to + * docker documentation for supported argument formats. */ export interface ContainerSettings { imageSourceRegistry: ImageSourceRegistry; + shmSize?: string; } /** * @class * Initializes a new instance of the CNTKsettings class. * @constructor - * Specifies the settings for CNTK (aka Microsoft Cognitive Toolkit) job. + * CNTK (aka Microsoft Cognitive Toolkit) job settings. * - * @member {string} [languageType] Specifies the language type to use for + * @member {string} [languageType] Language type. The language to use for * launching CNTK (aka Microsoft Cognitive Toolkit) job. Valid values are * 'BrainScript' or 'Python'. - * @member {string} [configFilePath] Specifies the path of the config file. - * This property can be specified only if the languageType is 'BrainScript'. - * @member {string} [pythonScriptFilePath] The path and file name of the python - * script to execute the job. This property can be specified only if the + * @member {string} [configFilePath] Config file path. Specifies the path of + * the BrainScript config file. This property can be specified only if the + * languageType is 'BrainScript'. + * @member {string} [pythonScriptFilePath] Python script file path. Python + * script to execute. This property can be specified only if the languageType + * is 'Python'. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. This property can be specified only if the * languageType is 'Python'. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * This property can be specified only if the languageType is 'Python'. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script or CNTK.exe. - * @member {number} [processCount] Number of processes parameter that is passed - * to MPI runtime. The default value for this property is equal to nodeCount - * property + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script or cntk executable. + * @member {number} [processCount] Process count. Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property */ export interface CNTKsettings { languageType?: string; @@ -1221,19 +1386,20 @@ export interface CNTKsettings { * @class * Initializes a new instance of the PyTorchSettings class. * @constructor - * Specifies the settings for pyTorch job. - * - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Specifies the command line arguments for - * the master task. - * @member {number} [processCount] Number of processes to launch for the job - * execution. The default value for this property is equal to nodeCount - * property. - * @member {string} [communicationBackend] Type of the communication backend - * for distributed jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required - * for non-distributed jobs. + * pyTorch job settings. + * + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. + * @member {number} [processCount] Process count. Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property + * @member {string} [communicationBackend] Communication backend. Type of the + * communication backend for distributed jobs. Valid values are 'TCP', 'Gloo' + * or 'MPI'. Not required for non-distributed jobs. */ export interface PyTorchSettings { pythonScriptFilePath: string; @@ -1247,28 +1413,30 @@ export interface PyTorchSettings { * @class * Initializes a new instance of the TensorFlowSettings class. * @constructor - * Specifies the settings for TensorFlow job. - * - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [masterCommandLineArgs] Specifies the command line - * arguments for the master task. - * @member {string} [workerCommandLineArgs] Specifies the command line - * arguments for the worker task. This property is optional for single machine - * training. - * @member {string} [parameterServerCommandLineArgs] Specifies the command line - * arguments for the parameter server task. This property is optional for - * single machine training. - * @member {number} [workerCount] The number of worker tasks. If specified, the - * value must be less than or equal to (nodeCount * numberOfGPUs per VM). If - * not specified, the default value is equal to nodeCount. This property can be - * specified only for distributed TensorFlow training - * @member {number} [parameterServerCount] The number of parmeter server tasks. - * If specified, the value must be less than or equal to nodeCount. If not - * specified, the default value is equal to 1 for distributed TensorFlow - * training (This property is not applicable for single machine training). This + * TensorFlow job settings. + * + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [masterCommandLineArgs] Master command line arguments. + * Command line arguments that need to be passed to the python script for the + * master task. + * @member {string} [workerCommandLineArgs] Worker command line arguments. + * Command line arguments that need to be passed to the python script for the + * worker task. Optional for single process jobs. + * @member {string} [parameterServerCommandLineArgs] Parameter server command + * line arguments. Command line arguments that need to be passed to the python + * script for the parameter server. Optional for single process jobs. + * @member {number} [workerCount] Worker count. The number of worker tasks. If + * specified, the value must be less than or equal to (nodeCount * numberOfGPUs + * per VM). If not specified, the default value is equal to nodeCount. This * property can be specified only for distributed TensorFlow training. + * @member {number} [parameterServerCount] Parameter server count. The number + * of parameter server tasks. If specified, the value must be less than or + * equal to nodeCount. If not specified, the default value is equal to 1 for + * distributed TensorFlow training. This property can be specified only for + * distributed TensorFlow training. */ export interface TensorFlowSettings { pythonScriptFilePath: string; @@ -1284,21 +1452,22 @@ export interface TensorFlowSettings { * @class * Initializes a new instance of the CaffeSettings class. * @constructor - * Specifies the settings for Caffe job. + * Caffe job settings. * - * @member {string} [configFilePath] Specifies the path of the config file. - * This property cannot be specified if pythonScriptFilePath is specified. - * @member {string} [pythonScriptFilePath] The path and file name of the python - * script to execute the job. This property cannot be specified if - * configFilePath is specified. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * This property can be specified only if the pythonScriptFilePath is + * @member {string} [configFilePath] Config file path. Path of the config file + * for the job. This property cannot be specified if pythonScriptFilePath is * specified. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the Caffe job. - * @member {number} [processCount] Number of processes parameter that is passed - * to MPI runtime. The default value for this property is equal to nodeCount - * property + * @member {string} [pythonScriptFilePath] Python script file path. Python + * script to execute. This property cannot be specified if configFilePath is + * specified. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. The property can be specified only if the + * pythonScriptFilePath is specified. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the Caffe job. + * @member {number} [processCount] Process count. Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property */ export interface CaffeSettings { configFilePath?: string; @@ -1312,13 +1481,14 @@ export interface CaffeSettings { * @class * Initializes a new instance of the Caffe2Settings class. * @constructor - * Specifies the settings for Caffe2 job. + * Caffe2 job settings. * - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script. + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. */ export interface Caffe2Settings { pythonScriptFilePath: string; @@ -1330,16 +1500,17 @@ export interface Caffe2Settings { * @class * Initializes a new instance of the ChainerSettings class. * @constructor - * Specifies the settings for Chainer job. + * Chainer job settings. * - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script. - * @member {number} [processCount] Number of processes parameter that is passed - * to MPI runtime. The default value for this property is equal to nodeCount - * property + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. + * @member {number} [processCount] Process count. Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property */ export interface ChainerSettings { pythonScriptFilePath: string; @@ -1352,10 +1523,10 @@ export interface ChainerSettings { * @class * Initializes a new instance of the CustomToolkitSettings class. * @constructor - * Specifies the settings for a custom tool kit job. + * Custom tool kit job settings. * - * @member {string} [commandLine] The command line to execute the custom - * toolkit Job. + * @member {string} [commandLine] Command line. The command line to execute on + * the master node. */ export interface CustomToolkitSettings { commandLine?: string; @@ -1365,13 +1536,13 @@ export interface CustomToolkitSettings { * @class * Initializes a new instance of the CustomMpiSettings class. * @constructor - * Specifies the settings for a custom tool kit job. + * Custom MPI job settings. * - * @member {string} commandLine The program and program command line parameters - * to be executed by mpi runtime. - * @member {number} [processCount] Number of processes parameter that is passed - * to MPI runtime. The default value for this property is equal to nodeCount - * property + * @member {string} commandLine Command line. The command line to be executed + * by mpi runtime on each compute node. + * @member {number} [processCount] Process count. Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property */ export interface CustomMpiSettings { commandLine: string; @@ -1382,16 +1553,17 @@ export interface CustomMpiSettings { * @class * Initializes a new instance of the HorovodSettings class. * @constructor - * Specifies the settings for Chainer job. + * Specifies the settings for Horovod job. * - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Command line arguments that needs to be - * passed to the python script. - * @member {number} [processCount] Number of processes parameter that is passed - * to MPI runtime. The default value for this property is equal to nodeCount - * property + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. + * @member {number} [processCount] Process count. Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property */ export interface HorovodSettings { pythonScriptFilePath: string; @@ -1404,9 +1576,9 @@ export interface HorovodSettings { * @class * Initializes a new instance of the JobPreparation class. * @constructor - * Specifies the settings for job preparation. + * Job preparation settings. * - * @member {string} commandLine The command line to execute. If + * @member {string} commandLine Command line. The command line to execute. If * containerSettings is specified on the job, this commandLine will be executed * in the same container as job. Otherwise it will be executed on the node. */ @@ -1420,10 +1592,10 @@ export interface JobPreparation { * @constructor * Input directory for the job. * - * @member {string} id The id for the input directory. The path of the input - * directory will be available as a value of an environment variable with - * AZ_BATCHAI_INPUT_ name, where is the value of id attribute. - * @member {string} path The path to the input directory. + * @member {string} id ID. The ID for the input directory. The job can use + * AZ_BATCHAI_INPUT_ environment variable to find the directory path, where + * is the value of id attribute. + * @member {string} path Path. The path to the input directory. */ export interface InputDirectory { id: string; @@ -1436,19 +1608,18 @@ export interface InputDirectory { * @constructor * Output directory for the job. * - * @member {string} id The name for the output directory. The path of the - * output directory will be available as a value of an environment variable - * with AZ_BATCHAI_OUTPUT_ name, where is the value of id attribute. - * @member {string} pathPrefix The prefix path where the output directory will - * be created. NOTE: This is an absolute path to prefix. E.g. - * $AZ_BATCHAI_MOUNT_ROOT/MyNFS/MyLogs. You can find the full path to the + * @member {string} id ID. The ID of the output directory. The job can use + * AZ_BATCHAI_OUTPUT_ environment variale to find the directory path, where + * is the value of id attribute. + * @member {string} pathPrefix Path prefix. The prefix path where the output + * directory will be created. Note, this is an absolute path to prefix. E.g. + * $AZ_BATCHAI_MOUNT_ROOT/MyNFS/MyLogs. The full path to the output directory + * by combining pathPrefix, jobOutputDirectoryPathSegment (reported by get job) + * and pathSuffix. + * @member {string} [pathSuffix] Path suffix. The suffix path where the output + * directory will be created. E.g. models. You can find the full path to the * output directory by combining pathPrefix, jobOutputDirectoryPathSegment * (reported by get job) and pathSuffix. - * @member {string} [pathSuffix] The suffix path where the output directory - * will be created. The suffix path where the output directory will be created. - * E.g. models. You can find the full path to the output directory by combining - * pathPrefix, jobOutputDirectoryPathSegment (reported by get job) and - * pathSuffix. */ export interface OutputDirectory { id: string; @@ -1462,8 +1633,9 @@ export interface OutputDirectory { * @constructor * Constraints associated with the Job. * - * @member {moment.duration} [maxWallClockTime] Max time the job can run. - * Default Value = 1 week. Default value: moment.duration('7.00:00:00') . + * @member {moment.duration} [maxWallClockTime] Max wall clock time. Max time + * the job can run. Default value: 1 week. Default value: + * moment.duration('7.00:00:00') . */ export interface JobBasePropertiesConstraints { maxWallClockTime?: moment.Duration; @@ -1473,146 +1645,195 @@ export interface JobBasePropertiesConstraints { * @class * Initializes a new instance of the JobCreateParameters class. * @constructor - * Parameters supplied to the Create operation. + * Job creation parameters. * - * @member {string} [schedulingPriority] Scheduling priority associated with - * the job. Scheduling priority associated with the job. Possible values - * include: 'low', 'normal', 'high'. Default value: 'normal' . - * @member {object} cluster Specifies the Id of the cluster on which this job - * will run. + * @member {string} [schedulingPriority] Scheduling priority. Scheduling + * priority associated with the job. Possible values: low, normal, high. + * Possible values include: 'low', 'normal', 'high'. Default value: 'normal' . + * @member {object} cluster Cluster. Resource ID of the cluster on which this + * job will run. * @member {string} [cluster.id] The ID of the resource - * @member {object} [mountVolumes] Information on mount volumes to be used by - * the job. These volumes will be mounted before the job execution and will be - * unmouted after the job completion. The volumes will be mounted at location - * specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. - * @member {array} [mountVolumes.azureFileShares] References to Azure File + * @member {object} [mountVolumes] Mount volumes. Information on mount volumes + * to be used by the job. These volumes will be mounted before the job + * execution and will be unmouted after the job completion. The volumes will be + * mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment + * variable. + * @member {array} [mountVolumes.azureFileShares] A collection of Azure File * Shares that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.azureBlobFileSystems] References to Azure Blob - * FUSE that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.fileServers] - * @member {array} [mountVolumes.unmanagedFileSystems] - * @member {number} nodeCount Number of compute nodes to run the job on. The - * job will be gang scheduled on that many compute nodes - * @member {object} [containerSettings] If provided the job will run in the - * specified container. If the container was downloaded as part of cluster - * setup then the same container image will be used. If not provided, the job - * will run on the VM. - * @member {object} [containerSettings.imageSourceRegistry] - * @member {string} [containerSettings.imageSourceRegistry.serverUrl] - * @member {string} [containerSettings.imageSourceRegistry.image] + * @member {array} [mountVolumes.azureBlobFileSystems] A collection of Azure + * Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.fileServers] A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.unmanagedFileSystems] A collection of + * unmanaged file systems that are to be mounted to the cluster nodes. + * @member {number} nodeCount Node count. Number of compute nodes to run the + * job on. The job will be gang scheduled on that many compute nodes. + * @member {object} [containerSettings] Container settings. Docker container + * settings for the job. If not provided, the job will run directly on the + * node. + * @member {object} [containerSettings.imageSourceRegistry] Information about + * docker image and docker registry to download the container from. + * @member {string} [containerSettings.imageSourceRegistry.serverUrl] URL for + * image repository. + * @member {string} [containerSettings.imageSourceRegistry.image] The name of + * the image in the image repository. * @member {object} [containerSettings.imageSourceRegistry.credentials] + * Credentials to access the private docker repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.username] + * [containerSettings.imageSourceRegistry.credentials.username] User name to + * login to the repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.password] One of password - * or passwordSecretReference must be specified. + * [containerSettings.imageSourceRegistry.credentials.password] User password + * to login to the docker repository. One of password or + * passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Users can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * KeyVault Secret storing the password. Users can store their secrets in Azure + * KeyVault and pass it to the Batch AI service to integrate with KeyVault. One + * of password or passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id] * The ID of the resource * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl] - * @member {object} [cntkSettings] Specifies the settings for CNTK (aka + * The URL referencing a secret in the Key Vault. + * @member {string} [containerSettings.shmSize] Size of /dev/shm. Please refer + * to docker documentation for supported argument formats. + * @member {object} [cntkSettings] CNTK settings. Settings for CNTK (aka * Microsoft Cognitive Toolkit) job. - * @member {string} [cntkSettings.languageType] Valid values are 'BrainScript' - * or 'Python'. - * @member {string} [cntkSettings.configFilePath] This property can be - * specified only if the languageType is 'BrainScript'. - * @member {string} [cntkSettings.pythonScriptFilePath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.pythonInterpreterPath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.commandLineArgs] - * @member {number} [cntkSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [pyTorchSettings] Specifies the settings for pyTorch job. - * @member {string} [pyTorchSettings.pythonScriptFilePath] - * @member {string} [pyTorchSettings.pythonInterpreterPath] - * @member {string} [pyTorchSettings.commandLineArgs] - * @member {number} [pyTorchSettings.processCount] The default value for this - * property is equal to nodeCount property. - * @member {string} [pyTorchSettings.communicationBackend] Valid values are - * 'TCP', 'Gloo' or 'MPI'. Not required for non-distributed jobs. - * @member {object} [tensorFlowSettings] Specifies the settings for Tensor Flow + * @member {string} [cntkSettings.languageType] The language to use for + * launching CNTK (aka Microsoft Cognitive Toolkit) job. Valid values are + * 'BrainScript' or 'Python'. + * @member {string} [cntkSettings.configFilePath] Specifies the path of the + * BrainScript config file. This property can be specified only if the + * languageType is 'BrainScript'. + * @member {string} [cntkSettings.pythonScriptFilePath] Python script to + * execute. This property can be specified only if the languageType is + * 'Python'. + * @member {string} [cntkSettings.pythonInterpreterPath] The path to the Python + * interpreter. This property can be specified only if the languageType is + * 'Python'. + * @member {string} [cntkSettings.commandLineArgs] Command line arguments that + * need to be passed to the python script or cntk executable. + * @member {number} [cntkSettings.processCount] Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property + * @member {object} [pyTorchSettings] pyTorch settings. Settings for pyTorch * job. - * @member {string} [tensorFlowSettings.pythonScriptFilePath] - * @member {string} [tensorFlowSettings.pythonInterpreterPath] - * @member {string} [tensorFlowSettings.masterCommandLineArgs] - * @member {string} [tensorFlowSettings.workerCommandLineArgs] This property is - * optional for single machine training. - * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] This - * property is optional for single machine training. - * @member {number} [tensorFlowSettings.workerCount] If specified, the value - * must be less than or equal to (nodeCount * numberOfGPUs per VM). If not - * specified, the default value is equal to nodeCount. This property can be - * specified only for distributed TensorFlow training - * @member {number} [tensorFlowSettings.parameterServerCount] If specified, the - * value must be less than or equal to nodeCount. If not specified, the default - * value is equal to 1 for distributed TensorFlow training (This property is - * not applicable for single machine training). This property can be specified - * only for distributed TensorFlow training. - * @member {object} [caffeSettings] Specifies the settings for Caffe job. - * @member {string} [caffeSettings.configFilePath] This property cannot be - * specified if pythonScriptFilePath is specified. - * @member {string} [caffeSettings.pythonScriptFilePath] This property cannot - * be specified if configFilePath is specified. - * @member {string} [caffeSettings.pythonInterpreterPath] This property can be - * specified only if the pythonScriptFilePath is specified. - * @member {string} [caffeSettings.commandLineArgs] - * @member {number} [caffeSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [caffe2Settings] Specifies the settings for Caffe2 job. - * @member {string} [caffe2Settings.pythonScriptFilePath] - * @member {string} [caffe2Settings.pythonInterpreterPath] - * @member {string} [caffe2Settings.commandLineArgs] - * @member {object} [chainerSettings] Specifies the settings for Chainer job. - * @member {string} [chainerSettings.pythonScriptFilePath] - * @member {string} [chainerSettings.pythonInterpreterPath] - * @member {string} [chainerSettings.commandLineArgs] - * @member {number} [chainerSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [customToolkitSettings] Specifies the settings for custom - * tool kit job. - * @member {string} [customToolkitSettings.commandLine] - * @member {object} [customMpiSettings] Specifies the settings for custom MPI + * @member {string} [pyTorchSettings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [pyTorchSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [pyTorchSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [pyTorchSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {string} [pyTorchSettings.communicationBackend] Type of the + * communication backend for distributed jobs. Valid values are 'TCP', 'Gloo' + * or 'MPI'. Not required for non-distributed jobs. + * @member {object} [tensorFlowSettings] TensorFlow settings. Settings for + * Tensor Flow job. + * @member {string} [tensorFlowSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [tensorFlowSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [tensorFlowSettings.masterCommandLineArgs] Command line + * arguments that need to be passed to the python script for the master task. + * @member {string} [tensorFlowSettings.workerCommandLineArgs] Command line + * arguments that need to be passed to the python script for the worker task. + * Optional for single process jobs. + * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] Command + * line arguments that need to be passed to the python script for the parameter + * server. Optional for single process jobs. + * @member {number} [tensorFlowSettings.workerCount] The number of worker + * tasks. If specified, the value must be less than or equal to (nodeCount * + * numberOfGPUs per VM). If not specified, the default value is equal to + * nodeCount. This property can be specified only for distributed TensorFlow + * training. + * @member {number} [tensorFlowSettings.parameterServerCount] The number of + * parameter server tasks. If specified, the value must be less than or equal + * to nodeCount. If not specified, the default value is equal to 1 for + * distributed TensorFlow training. This property can be specified only for + * distributed TensorFlow training. + * @member {object} [caffeSettings] Caffe settings. Settings for Caffe job. + * @member {string} [caffeSettings.configFilePath] Path of the config file for + * the job. This property cannot be specified if pythonScriptFilePath is + * specified. + * @member {string} [caffeSettings.pythonScriptFilePath] Python script to + * execute. This property cannot be specified if configFilePath is specified. + * @member {string} [caffeSettings.pythonInterpreterPath] The path to the + * Python interpreter. The property can be specified only if the + * pythonScriptFilePath is specified. + * @member {string} [caffeSettings.commandLineArgs] Command line arguments that + * need to be passed to the Caffe job. + * @member {number} [caffeSettings.processCount] Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property + * @member {object} [caffe2Settings] Caffe2 settings. Settings for Caffe2 job. + * @member {string} [caffe2Settings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [caffe2Settings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [caffe2Settings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {object} [chainerSettings] Chainer settings. Settings for Chainer * job. - * @member {string} [customMpiSettings.commandLine] - * @member {number} [customMpiSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [horovodSettings] Specifies the settings for Horovod job. - * @member {string} [horovodSettings.pythonScriptFilePath] - * @member {string} [horovodSettings.pythonInterpreterPath] - * @member {string} [horovodSettings.commandLineArgs] - * @member {number} [horovodSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [jobPreparation] Specifies the command line to be executed - * before tool kit is launched. The specified actions will run on all the nodes - * that are part of the job - * @member {string} [jobPreparation.commandLine] If containerSettings is - * specified on the job, this commandLine will be executed in the same - * container as job. Otherwise it will be executed on the node. - * @member {string} stdOutErrPathPrefix The path where the Batch AI service - * will upload stdout and stderror of the job. - * @member {array} [inputDirectories] Specifies the list of input directories - * for the Job. - * @member {array} [outputDirectories] Specifies the list of output - * directories. - * @member {array} [environmentVariables] Additional environment variables to - * set on the job. Batch AI will setup these additional environment variables - * for the job. - * @member {array} [secrets] Additional environment variables with secret - * values to set on the job. Batch AI will setup these additional environment - * variables for the job. Server will never report values of these variables - * back. + * @member {string} [chainerSettings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [chainerSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [chainerSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [chainerSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [customToolkitSettings] Custom tool kit job. Settings for + * custom tool kit job. + * @member {string} [customToolkitSettings.commandLine] The command line to + * execute on the master node. + * @member {object} [customMpiSettings] Custom MPI settings. Settings for + * custom MPI job. + * @member {string} [customMpiSettings.commandLine] The command line to be + * executed by mpi runtime on each compute node. + * @member {number} [customMpiSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [horovodSettings] Horovod settings. Settings for Horovod + * job. + * @member {string} [horovodSettings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [horovodSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [horovodSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [horovodSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [jobPreparation] Job preparation. A command line to be + * executed on each node allocated for the job before tool kit is launched. + * @member {string} [jobPreparation.commandLine] The command line to execute. + * If containerSettings is specified on the job, this commandLine will be + * executed in the same container as job. Otherwise it will be executed on the + * node. + * @member {string} stdOutErrPathPrefix Standard output path prefix. The path + * where the Batch AI service will store stdout, stderror and execution log of + * the job. + * @member {array} [inputDirectories] Input directories. A list of input + * directories for the job. + * @member {array} [outputDirectories] Output directories. A list of output + * directories for the job. + * @member {array} [environmentVariables] Environment variables. A list of user + * defined environment variables which will be setup for the job. + * @member {array} [secrets] Secrets. A list of user defined environment + * variables with secret values which will be setup for the job. Server will + * never report values of these variables back. * @member {object} [constraints] Constraints associated with the Job. - * @member {moment.duration} [constraints.maxWallClockTime] Default Value = 1 - * week. + * @member {moment.duration} [constraints.maxWallClockTime] Max time the job + * can run. Default value: 1 week. */ export interface JobCreateParameters { schedulingPriority?: string; @@ -1644,8 +1865,9 @@ export interface JobCreateParameters { * @constructor * Constraints associated with the Job. * - * @member {moment.duration} [maxWallClockTime] Max time the job can run. - * Default Value = 1 week. Default value: moment.duration('7.00:00:00') . + * @member {moment.duration} [maxWallClockTime] Max wall clock time. Max time + * the job can run. Default value: 1 week. Default value: + * moment.duration('7.00:00:00') . */ export interface JobPropertiesConstraints { maxWallClockTime?: moment.Duration; @@ -1655,19 +1877,19 @@ export interface JobPropertiesConstraints { * @class * Initializes a new instance of the JobPropertiesExecutionInfo class. * @constructor - * Contains information about the execution of a job in the Azure Batch - * service. - * - * @member {date} [startTime] The time at which the job started running. - * 'Running' corresponds to the running state. If the job has been restarted or - * retried, this is the most recent time at which the job started running. This - * property is present only for job that are in the running or completed state. - * @member {date} [endTime] The time at which the job completed. This property - * is only returned if the job is in completed state. - * @member {number} [exitCode] The exit code of the job. This property is only - * returned if the job is in completed state. - * @member {array} [errors] Contains details of various errors encountered by - * the service during job execution. + * Information about the execution of a job. + * + * @member {date} [startTime] Start time. The time at which the job started + * running. 'Running' corresponds to the running state. If the job has been + * restarted or retried, this is the most recent time at which the job started + * running. This property is present only for job that are in the running or + * completed state. + * @member {date} [endTime] End time. The time at which the job completed. This + * property is only returned if the job is in completed state. + * @member {number} [exitCode] Exit code. The exit code of the job. This + * property is only returned if the job is in completed state. + * @member {array} [errors] Errors. A collection of errors encountered by the + * service during job execution. */ export interface JobPropertiesExecutionInfo { readonly startTime?: Date; @@ -1676,211 +1898,246 @@ export interface JobPropertiesExecutionInfo { readonly errors?: BatchAIError[]; } -/** - * @class - * Initializes a new instance of the ProxyResource class. - * @constructor - * A definition of an Azure proxy resource. - * - * @member {string} [id] The ID of the resource. - * @member {string} [name] The name of the resource. - * @member {string} [type] The type of the resource. - */ -export interface ProxyResource extends BaseResource { - readonly id?: string; - readonly name?: string; - readonly type?: string; -} - /** * @class * Initializes a new instance of the Job class. * @constructor - * Contains information about a Job. + * Information about a Job. * - * @member {string} [priority] Priority associated with the job. Priority - * associated with the job. Possible values include: 'low', 'normal', 'high'. - * Default value: 'normal' . - * @member {object} [cluster] Specifies the Id of the cluster on which this job - * will run. + * @member {string} [schedulingPriority] Scheduling priority. Scheduling + * priority associated with the job. Possible values include: 'low', 'normal', + * 'high'. Default value: 'normal' . + * @member {object} [cluster] Cluster. Resource ID of the cluster associated + * with the job. * @member {string} [cluster.id] The ID of the resource - * @member {object} [mountVolumes] Information on mount volumes to be used by - * the job. These volumes will be mounted before the job execution and will be - * unmouted after the job completion. The volumes will be mounted at location - * specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. - * @member {array} [mountVolumes.azureFileShares] References to Azure File + * @member {object} [mountVolumes] Mount volumes. Collection of mount volumes + * available to the job during execution. These volumes are mounted before the + * job execution and unmouted after the job completion. The volumes are mounted + * at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @member {array} [mountVolumes.azureFileShares] A collection of Azure File * Shares that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.azureBlobFileSystems] References to Azure Blob - * FUSE that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.fileServers] - * @member {array} [mountVolumes.unmanagedFileSystems] - * @member {string} [jobOutputDirectoryPathSegment] A segment of job's output - * directories path created by BatchAI. Batch AI creates job's output - * directories under an unique path to avoid conflicts between jobs. This value - * contains a path segment generated by Batch AI to make the path unique and - * can be used to find the output directory on the node or mounted filesystem. + * @member {array} [mountVolumes.azureBlobFileSystems] A collection of Azure + * Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.fileServers] A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.unmanagedFileSystems] A collection of + * unmanaged file systems that are to be mounted to the cluster nodes. * @member {number} [nodeCount] Number of compute nodes to run the job on. The * job will be gang scheduled on that many compute nodes * @member {object} [containerSettings] If provided the job will run in the * specified container. If the container was downloaded as part of cluster * setup then the same container image will be used. If not provided, the job * will run on the VM. - * @member {object} [containerSettings.imageSourceRegistry] - * @member {string} [containerSettings.imageSourceRegistry.serverUrl] - * @member {string} [containerSettings.imageSourceRegistry.image] + * @member {object} [containerSettings.imageSourceRegistry] Information about + * docker image and docker registry to download the container from. + * @member {string} [containerSettings.imageSourceRegistry.serverUrl] URL for + * image repository. + * @member {string} [containerSettings.imageSourceRegistry.image] The name of + * the image in the image repository. * @member {object} [containerSettings.imageSourceRegistry.credentials] + * Credentials to access the private docker repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.username] + * [containerSettings.imageSourceRegistry.credentials.username] User name to + * login to the repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.password] One of password - * or passwordSecretReference must be specified. + * [containerSettings.imageSourceRegistry.credentials.password] User password + * to login to the docker repository. One of password or + * passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Users can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * KeyVault Secret storing the password. Users can store their secrets in Azure + * KeyVault and pass it to the Batch AI service to integrate with KeyVault. One + * of password or passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id] * The ID of the resource * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl] + * The URL referencing a secret in the Key Vault. + * @member {string} [containerSettings.shmSize] Size of /dev/shm. Please refer + * to docker documentation for supported argument formats. * @member {string} [toolType] The toolkit type of this job. Possible values * are: cntk, tensorflow, caffe, caffe2, chainer, pytorch, custom, mpi, * horovod. Possible values include: 'cntk', 'tensorflow', 'caffe', 'caffe2', * 'chainer', 'horovod', 'mpi', 'custom' * @member {object} [cntkSettings] Specifies the settings for CNTK (aka * Microsoft Cognitive Toolkit) job. - * @member {string} [cntkSettings.languageType] Valid values are 'BrainScript' - * or 'Python'. - * @member {string} [cntkSettings.configFilePath] This property can be - * specified only if the languageType is 'BrainScript'. - * @member {string} [cntkSettings.pythonScriptFilePath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.pythonInterpreterPath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.commandLineArgs] - * @member {number} [cntkSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [cntkSettings.languageType] The language to use for + * launching CNTK (aka Microsoft Cognitive Toolkit) job. Valid values are + * 'BrainScript' or 'Python'. + * @member {string} [cntkSettings.configFilePath] Specifies the path of the + * BrainScript config file. This property can be specified only if the + * languageType is 'BrainScript'. + * @member {string} [cntkSettings.pythonScriptFilePath] Python script to + * execute. This property can be specified only if the languageType is + * 'Python'. + * @member {string} [cntkSettings.pythonInterpreterPath] The path to the Python + * interpreter. This property can be specified only if the languageType is + * 'Python'. + * @member {string} [cntkSettings.commandLineArgs] Command line arguments that + * need to be passed to the python script or cntk executable. + * @member {number} [cntkSettings.processCount] Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property * @member {object} [pyTorchSettings] Specifies the settings for pyTorch job. - * @member {string} [pyTorchSettings.pythonScriptFilePath] - * @member {string} [pyTorchSettings.pythonInterpreterPath] - * @member {string} [pyTorchSettings.commandLineArgs] - * @member {number} [pyTorchSettings.processCount] The default value for this - * property is equal to nodeCount property. - * @member {string} [pyTorchSettings.communicationBackend] Valid values are - * 'TCP', 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @member {string} [pyTorchSettings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [pyTorchSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [pyTorchSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [pyTorchSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {string} [pyTorchSettings.communicationBackend] Type of the + * communication backend for distributed jobs. Valid values are 'TCP', 'Gloo' + * or 'MPI'. Not required for non-distributed jobs. * @member {object} [tensorFlowSettings] Specifies the settings for Tensor Flow * job. - * @member {string} [tensorFlowSettings.pythonScriptFilePath] - * @member {string} [tensorFlowSettings.pythonInterpreterPath] - * @member {string} [tensorFlowSettings.masterCommandLineArgs] - * @member {string} [tensorFlowSettings.workerCommandLineArgs] This property is - * optional for single machine training. - * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] This - * property is optional for single machine training. - * @member {number} [tensorFlowSettings.workerCount] If specified, the value - * must be less than or equal to (nodeCount * numberOfGPUs per VM). If not - * specified, the default value is equal to nodeCount. This property can be - * specified only for distributed TensorFlow training - * @member {number} [tensorFlowSettings.parameterServerCount] If specified, the - * value must be less than or equal to nodeCount. If not specified, the default - * value is equal to 1 for distributed TensorFlow training (This property is - * not applicable for single machine training). This property can be specified - * only for distributed TensorFlow training. + * @member {string} [tensorFlowSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [tensorFlowSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [tensorFlowSettings.masterCommandLineArgs] Command line + * arguments that need to be passed to the python script for the master task. + * @member {string} [tensorFlowSettings.workerCommandLineArgs] Command line + * arguments that need to be passed to the python script for the worker task. + * Optional for single process jobs. + * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] Command + * line arguments that need to be passed to the python script for the parameter + * server. Optional for single process jobs. + * @member {number} [tensorFlowSettings.workerCount] The number of worker + * tasks. If specified, the value must be less than or equal to (nodeCount * + * numberOfGPUs per VM). If not specified, the default value is equal to + * nodeCount. This property can be specified only for distributed TensorFlow + * training. + * @member {number} [tensorFlowSettings.parameterServerCount] The number of + * parameter server tasks. If specified, the value must be less than or equal + * to nodeCount. If not specified, the default value is equal to 1 for + * distributed TensorFlow training. This property can be specified only for + * distributed TensorFlow training. * @member {object} [caffeSettings] Specifies the settings for Caffe job. - * @member {string} [caffeSettings.configFilePath] This property cannot be - * specified if pythonScriptFilePath is specified. - * @member {string} [caffeSettings.pythonScriptFilePath] This property cannot - * be specified if configFilePath is specified. - * @member {string} [caffeSettings.pythonInterpreterPath] This property can be - * specified only if the pythonScriptFilePath is specified. - * @member {string} [caffeSettings.commandLineArgs] - * @member {number} [caffeSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [caffeSettings.configFilePath] Path of the config file for + * the job. This property cannot be specified if pythonScriptFilePath is + * specified. + * @member {string} [caffeSettings.pythonScriptFilePath] Python script to + * execute. This property cannot be specified if configFilePath is specified. + * @member {string} [caffeSettings.pythonInterpreterPath] The path to the + * Python interpreter. The property can be specified only if the + * pythonScriptFilePath is specified. + * @member {string} [caffeSettings.commandLineArgs] Command line arguments that + * need to be passed to the Caffe job. + * @member {number} [caffeSettings.processCount] Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property + * @member {object} [caffe2Settings] Specifies the settings for Caffe2 job. + * @member {string} [caffe2Settings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [caffe2Settings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [caffe2Settings.commandLineArgs] Command line arguments + * that need to be passed to the python script. * @member {object} [chainerSettings] Specifies the settings for Chainer job. - * @member {string} [chainerSettings.pythonScriptFilePath] - * @member {string} [chainerSettings.pythonInterpreterPath] - * @member {string} [chainerSettings.commandLineArgs] - * @member {number} [chainerSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [chainerSettings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [chainerSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [chainerSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [chainerSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property * @member {object} [customToolkitSettings] Specifies the settings for custom * tool kit job. - * @member {string} [customToolkitSettings.commandLine] + * @member {string} [customToolkitSettings.commandLine] The command line to + * execute on the master node. * @member {object} [customMpiSettings] Specifies the settings for custom MPI * job. - * @member {string} [customMpiSettings.commandLine] - * @member {number} [customMpiSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [customMpiSettings.commandLine] The command line to be + * executed by mpi runtime on each compute node. + * @member {number} [customMpiSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property * @member {object} [horovodSettings] Specifies the settings for Horovod job. - * @member {string} [horovodSettings.pythonScriptFilePath] - * @member {string} [horovodSettings.pythonInterpreterPath] - * @member {string} [horovodSettings.commandLineArgs] - * @member {number} [horovodSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [horovodSettings.pythonScriptFilePath] The python script to + * execute. + * @member {string} [horovodSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [horovodSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [horovodSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property * @member {object} [jobPreparation] Specifies the actions to be performed * before tool kit is launched. The specified actions will run on all the nodes * that are part of the job - * @member {string} [jobPreparation.commandLine] If containerSettings is - * specified on the job, this commandLine will be executed in the same - * container as job. Otherwise it will be executed on the node. - * @member {string} [stdOutErrPathPrefix] The path where the Batch AI service - * will upload stdout and stderror of the job. - * @member {array} [inputDirectories] Specifies the list of input directories - * for the Job. - * @member {array} [outputDirectories] Specifies the list of output directories - * where the models will be created. - * @member {array} [environmentVariables] Additional environment variables to - * set on the job. Batch AI will setup these additional environment variables - * for the job. - * @member {array} [secrets] Additional environment variables with secret - * values to set on the job. Batch AI will setup these additional environment - * variables for the job. Server will never report values of these variables - * back. + * @member {string} [jobPreparation.commandLine] The command line to execute. + * If containerSettings is specified on the job, this commandLine will be + * executed in the same container as job. Otherwise it will be executed on the + * node. + * @member {string} [jobOutputDirectoryPathSegment] Output directory path + * segment. A segment of job's output directories path created by Batch AI. + * Batch AI creates job's output directories under an unique path to avoid + * conflicts between jobs. This value contains a path segment generated by + * Batch AI to make the path unique and can be used to find the output + * directory on the node or mounted filesystem. + * @member {string} [stdOutErrPathPrefix] Standard output directory path + * prefix. The path where the Batch AI service stores stdout, stderror and + * execution log of the job. + * @member {array} [inputDirectories] Input directories. A list of input + * directories for the job. + * @member {array} [outputDirectories] Output directories. A list of output + * directories for the job. + * @member {array} [environmentVariables] Environment variables. A collection + * of user defined environment variables to be setup for the job. + * @member {array} [secrets] Secrets. A collection of user defined environment + * variables with secret values to be setup for the job. Server will never + * report values of these variables back. * @member {object} [constraints] Constraints associated with the Job. - * @member {moment.duration} [constraints.maxWallClockTime] Default Value = 1 - * week. - * @member {date} [creationTime] The job creation time. The creation time of - * the job. - * @member {string} [provisioningState] The provisioned state of the Batch AI - * job. Possible values include: 'creating', 'succeeded', 'failed', 'deleting' - * @member {date} [provisioningStateTransitionTime] The time at which the job - * entered its current provisioning state. The time at which the job entered - * its current provisioning state. - * @member {string} [executionState] The current state of the job. The current - * state of the job. Possible values are: queued - The job is queued and able - * to run. A job enters this state when it is created, or when it is awaiting a - * retry after a failed run. running - The job is running on a compute cluster. - * This includes job-level preparation such as downloading resource files or - * set up container specified on the job - it does not necessarily mean that - * the job command line has started executing. terminating - The job is - * terminated by the user, the terminate operation is in progress. succeeded - - * The job has completed running succesfully and exited with exit code 0. - * failed - The job has finished unsuccessfully (failed with a non-zero exit - * code) and has exhausted its retry limit. A job is also marked as failed if - * an error occurred launching the job. Possible values include: 'queued', - * 'running', 'terminating', 'succeeded', 'failed' - * @member {date} [executionStateTransitionTime] The time at which the job - * entered its current execution state. The time at which the job entered its - * current execution state. - * @member {object} [executionInfo] Contains information about the execution of - * a job in the Azure Batch service. - * @member {date} [executionInfo.startTime] 'Running' corresponds to the - * running state. If the job has been restarted or retried, this is the most - * recent time at which the job started running. This property is present only - * for job that are in the running or completed state. - * @member {date} [executionInfo.endTime] This property is only returned if the - * job is in completed state. - * @member {number} [executionInfo.exitCode] This property is only returned if - * the job is in completed state. - * @member {array} [executionInfo.errors] + * @member {moment.duration} [constraints.maxWallClockTime] Max time the job + * can run. Default value: 1 week. + * @member {date} [creationTime] Creation time. The creation time of the job. + * @member {string} [provisioningState] Provisioning state. The provisioned + * state of the Batch AI job. Possible values include: 'creating', 'succeeded', + * 'failed', 'deleting' + * @member {date} [provisioningStateTransitionTime] Provisioning state + * transition time. The time at which the job entered its current provisioning + * state. + * @member {string} [executionState] Execution state. The current state of the + * job. Possible values are: queued - The job is queued and able to run. A job + * enters this state when it is created, or when it is awaiting a retry after a + * failed run. running - The job is running on a compute cluster. This includes + * job-level preparation such as downloading resource files or set up container + * specified on the job - it does not necessarily mean that the job command + * line has started executing. terminating - The job is terminated by the user, + * the terminate operation is in progress. succeeded - The job has completed + * running succesfully and exited with exit code 0. failed - The job has + * finished unsuccessfully (failed with a non-zero exit code) and has exhausted + * its retry limit. A job is also marked as failed if an error occurred + * launching the job. Possible values include: 'queued', 'running', + * 'terminating', 'succeeded', 'failed' + * @member {date} [executionStateTransitionTime] Execution state transition + * time. The time at which the job entered its current execution state. + * @member {object} [executionInfo] Information about the execution of a job. + * @member {date} [executionInfo.startTime] The time at which the job started + * running. 'Running' corresponds to the running state. If the job has been + * restarted or retried, this is the most recent time at which the job started + * running. This property is present only for job that are in the running or + * completed state. + * @member {date} [executionInfo.endTime] The time at which the job completed. + * This property is only returned if the job is in completed state. + * @member {number} [executionInfo.exitCode] The exit code of the job. This + * property is only returned if the job is in completed state. + * @member {array} [executionInfo.errors] A collection of errors encountered by + * the service during job execution. */ export interface Job extends ProxyResource { - priority?: string; + schedulingPriority?: string; cluster?: ResourceId; mountVolumes?: MountVolumes; - readonly jobOutputDirectoryPathSegment?: string; nodeCount?: number; containerSettings?: ContainerSettings; toolType?: string; @@ -1888,11 +2145,13 @@ export interface Job extends ProxyResource { pyTorchSettings?: PyTorchSettings; tensorFlowSettings?: TensorFlowSettings; caffeSettings?: CaffeSettings; + caffe2Settings?: Caffe2Settings; chainerSettings?: ChainerSettings; customToolkitSettings?: CustomToolkitSettings; customMpiSettings?: CustomMpiSettings; horovodSettings?: HorovodSettings; jobPreparation?: JobPreparation; + readonly jobOutputDirectoryPathSegment?: string; stdOutErrPathPrefix?: string; inputDirectories?: InputDirectory[]; outputDirectories?: OutputDirectory[]; @@ -1911,11 +2170,12 @@ export interface Job extends ProxyResource { * @class * Initializes a new instance of the RemoteLoginInformation class. * @constructor - * Contains remote login details to SSH/RDP to a compute node in cluster. + * Login details to SSH to a compute node in cluster. * - * @member {string} [nodeId] Id of the compute node - * @member {string} [ipAddress] ip address - * @member {number} [port] port number. + * @member {string} [nodeId] Node ID. ID of the compute node. + * @member {string} [ipAddress] IP address. Public IP address of the compute + * node. + * @member {number} [port] Port. SSH port number of the node. */ export interface RemoteLoginInformation { readonly nodeId?: string; @@ -1929,14 +2189,14 @@ export interface RemoteLoginInformation { * @constructor * Properties of the file or directory. * - * @member {string} [name] Name of the file. - * @member {string} [fileType] Contains information about file type. Possible - * values include: 'file', 'directory' - * @member {string} [downloadUrl] Will contain an URL to download the + * @member {string} [name] Name. Name of the file. + * @member {string} [fileType] File type. Type of the file. Possible values are + * file and directory. Possible values include: 'file', 'directory' + * @member {string} [downloadUrl] Download URL. URL to download the * corresponding file. The downloadUrl is not returned for directories. - * @member {date} [lastModified] The time at which the file was last modified. - * The time at which the file was last modified. - * @member {number} [contentLength] The file size. The file size. + * @member {date} [lastModified] Last modified time. The time at which the file + * was last modified. + * @member {number} [contentLength] Content length. The file of the size. */ export interface File { readonly name?: string; @@ -1946,6 +2206,26 @@ export interface File { readonly contentLength?: number; } +/** + * @class + * Initializes a new instance of the Resource class. + * @constructor + * A definition of an Azure resource. + * + * @member {string} [id] The ID of the resource + * @member {string} [name] The name of the resource + * @member {string} [type] The type of the resource + * @member {string} [location] The location of the resource + * @member {object} [tags] The tags of the resource + */ +export interface Resource extends BaseResource { + readonly id?: string; + readonly name?: string; + readonly type?: string; + readonly location?: string; + readonly tags?: { [propertyName: string]: string }; +} + /** * @class * Initializes a new instance of the OperationDisplay class. @@ -1970,7 +2250,7 @@ export interface OperationDisplay { * @class * Initializes a new instance of the Operation class. * @constructor - * @summary A REST API operation + * @summary A REST API operation. * * Details of a REST API operation * @@ -1996,14 +2276,16 @@ export interface Operation { * @class * Initializes a new instance of the Workspace class. * @constructor - * Describes Batch AI Workspace. + * Batch AI Workspace information. * - * @member {date} [creationTime] Time when the Workspace was created. - * @member {string} [provisioningState] The provisioned state of the workspace. - * Possible values include: 'creating', 'succeeded', 'failed', 'deleting' - * @member {date} [provisioningStateTransitionTime] The time at which the - * workspace entered its current provisioning state. The time at which the - * workspace entered its current provisioning state. + * @member {date} [creationTime] Creation time. Time when the Workspace was + * created. + * @member {string} [provisioningState] Provisioning state. The provisioned + * state of the Workspace. Possible values include: 'creating', 'succeeded', + * 'failed', 'deleting' + * @member {date} [provisioningStateTransitionTime] Provisioning state + * transition time. The time at which the workspace entered its current + * provisioning state. */ export interface Workspace extends Resource { readonly creationTime?: Date; @@ -2015,10 +2297,11 @@ export interface Workspace extends Resource { * @class * Initializes a new instance of the WorkspaceCreateParameters class. * @constructor - * Parameters supplied to the Create operation. + * Workspace creation parameters. * - * @member {string} location The region in which to create the Workspace. - * @member {object} [tags] The user specified tags associated with the + * @member {string} location Location. The region in which to create the + * Workspace. + * @member {object} [tags] Tags. The user specified tags associated with the * Workspace. */ export interface WorkspaceCreateParameters { @@ -2026,19 +2309,33 @@ export interface WorkspaceCreateParameters { tags?: { [propertyName: string]: string }; } +/** + * @class + * Initializes a new instance of the WorkspaceUpdateParameters class. + * @constructor + * Workspace update parameters. + * + * @member {object} [tags] Tags. The user specified tags associated with the + * Workspace. + */ +export interface WorkspaceUpdateParameters { + tags?: { [propertyName: string]: string }; +} + /** * @class * Initializes a new instance of the Experiment class. * @constructor - * Contains information about the experiment. + * Experiment information. * - * @member {date} [creationTime] Time when the Experiment was created. - * @member {string} [provisioningState] The provisioned state of the - * experiment. Possible values include: 'creating', 'succeeded', 'failed', - * 'deleting' - * @member {date} [provisioningStateTransitionTime] The time at which the - * experiment entered its current provisioning state. The time at which the - * experiment entered its current provisioning state. + * @member {date} [creationTime] Creation time. Time when the Experiment was + * created. + * @member {string} [provisioningState] Provisioning state. The provisioned + * state of the experiment. Possible values include: 'creating', 'succeeded', + * 'failed', 'deleting' + * @member {date} [provisioningStateTransitionTime] Provisioning state + * transition time. The time at which the experiment entered its current + * provisioning state. */ export interface Experiment extends ProxyResource { readonly creationTime?: Date; @@ -2048,66 +2345,75 @@ export interface Experiment extends ProxyResource { /** * @class - * Initializes a new instance of the ClustersListOptions class. + * Initializes a new instance of the WorkspacesListOptions class. * @constructor * Additional parameters for list operation. * * @member {number} [maxResults] The maximum number of items to return in the * response. A maximum of 1000 files can be returned. Default value: 1000 . */ -export interface ClustersListOptions { +export interface WorkspacesListOptions { maxResults?: number; } /** * @class - * Initializes a new instance of the ClustersListByResourceGroupOptions class. + * Initializes a new instance of the WorkspacesListByResourceGroupOptions class. * @constructor * Additional parameters for listByResourceGroup operation. * * @member {number} [maxResults] The maximum number of items to return in the * response. A maximum of 1000 files can be returned. Default value: 1000 . */ -export interface ClustersListByResourceGroupOptions { +export interface WorkspacesListByResourceGroupOptions { maxResults?: number; } /** * @class - * Initializes a new instance of the ClustersListByWorkspaceOptions class. + * Initializes a new instance of the ExperimentsListByWorkspaceOptions class. * @constructor * Additional parameters for listByWorkspace operation. * * @member {number} [maxResults] The maximum number of items to return in the * response. A maximum of 1000 files can be returned. Default value: 1000 . */ -export interface ClustersListByWorkspaceOptions { +export interface ExperimentsListByWorkspaceOptions { maxResults?: number; } /** * @class - * Initializes a new instance of the FileServersListOptions class. + * Initializes a new instance of the JobsListByExperimentOptions class. * @constructor - * Additional parameters for list operation. + * Additional parameters for listByExperiment operation. * * @member {number} [maxResults] The maximum number of items to return in the * response. A maximum of 1000 files can be returned. Default value: 1000 . */ -export interface FileServersListOptions { +export interface JobsListByExperimentOptions { maxResults?: number; } /** * @class - * Initializes a new instance of the FileServersListByResourceGroupOptions class. + * Initializes a new instance of the JobsListOutputFilesOptions class. * @constructor - * Additional parameters for listByResourceGroup operation. + * Additional parameters for listOutputFiles operation. * + * @member {string} outputdirectoryid Id of the job output directory. This is + * the OutputDirectory-->id parameter that is given by the user during Create + * Job. + * @member {string} [directory] The path to the directory. Default value: '.' . + * @member {number} [linkexpiryinminutes] The number of minutes after which the + * download link will expire. Default value: 60 . * @member {number} [maxResults] The maximum number of items to return in the * response. A maximum of 1000 files can be returned. Default value: 1000 . */ -export interface FileServersListByResourceGroupOptions { +export interface JobsListOutputFilesOptions { + outputdirectoryid: string; + directory?: string; + linkexpiryinminutes?: number; maxResults?: number; } @@ -2126,75 +2432,14 @@ export interface FileServersListByWorkspaceOptions { /** * @class - * Initializes a new instance of the WorkspacesListOptions class. - * @constructor - * Additional parameters for list operation. - * - * @member {number} [maxResults] The maximum number of items to return in the - * response. A maximum of 1000 files can be returned. Default value: 1000 . - */ -export interface WorkspacesListOptions { - maxResults?: number; -} - -/** - * @class - * Initializes a new instance of the WorkspacesListByResourceGroupOptions class. - * @constructor - * Additional parameters for listByResourceGroup operation. - * - * @member {number} [maxResults] The maximum number of items to return in the - * response. A maximum of 1000 files can be returned. Default value: 1000 . - */ -export interface WorkspacesListByResourceGroupOptions { - maxResults?: number; -} - -/** - * @class - * Initializes a new instance of the ExperimentsListByWorkspaceOptions class. + * Initializes a new instance of the ClustersListByWorkspaceOptions class. * @constructor * Additional parameters for listByWorkspace operation. * * @member {number} [maxResults] The maximum number of items to return in the * response. A maximum of 1000 files can be returned. Default value: 1000 . */ -export interface ExperimentsListByWorkspaceOptions { - maxResults?: number; -} - -/** - * @class - * Initializes a new instance of the JobsListByExperimentOptions class. - * @constructor - * Additional parameters for listByExperiment operation. - * - * @member {number} [maxResults] The maximum number of items to return in the - * response. A maximum of 1000 files can be returned. Default value: 1000 . - */ -export interface JobsListByExperimentOptions { - maxResults?: number; -} - -/** - * @class - * Initializes a new instance of the JobsListOutputFilesOptions class. - * @constructor - * Additional parameters for listOutputFiles operation. - * - * @member {string} outputdirectoryid Id of the job output directory. This is - * the OutputDirectory-->id parameter that is given by the user during Create - * Job. - * @member {string} [directory] The path to the directory. Default value: '.' . - * @member {number} [linkexpiryinminutes] The number of minutes after which the - * download link will expire. Default value: 60 . - * @member {number} [maxResults] The maximum number of items to return in the - * response. A maximum of 1000 files can be returned. Default value: 1000 . - */ -export interface JobsListOutputFilesOptions { - outputdirectoryid: string; - directory?: string; - linkexpiryinminutes?: number; +export interface ClustersListByWorkspaceOptions { maxResults?: number; } @@ -2230,84 +2475,84 @@ export interface ListUsagesResult extends Array { /** * @class - * Initializes a new instance of the ClusterListResult class. + * Initializes a new instance of the WorkspaceListResult class. * @constructor - * Values returned by the List Clusters operation. + * Values returned by the List operation. * * @member {string} [nextLink] The continuation token. */ -export interface ClusterListResult extends Array { +export interface WorkspaceListResult extends Array { readonly nextLink?: string; } /** * @class - * Initializes a new instance of the RemoteLoginInformationListResult class. + * Initializes a new instance of the ExperimentListResult class. * @constructor * Values returned by the List operation. * * @member {string} [nextLink] The continuation token. */ -export interface RemoteLoginInformationListResult extends Array { +export interface ExperimentListResult extends Array { readonly nextLink?: string; } /** * @class - * Initializes a new instance of the FileServerListResult class. + * Initializes a new instance of the JobListResult class. * @constructor * Values returned by the List operation. * * @member {string} [nextLink] The continuation token. */ -export interface FileServerListResult extends Array { +export interface JobListResult extends Array { readonly nextLink?: string; } /** * @class - * Initializes a new instance of the WorkspaceListResult class. + * Initializes a new instance of the FileListResult class. * @constructor * Values returned by the List operation. * * @member {string} [nextLink] The continuation token. */ -export interface WorkspaceListResult extends Array { +export interface FileListResult extends Array { readonly nextLink?: string; } /** * @class - * Initializes a new instance of the ExperimentListResult class. + * Initializes a new instance of the RemoteLoginInformationListResult class. * @constructor * Values returned by the List operation. * * @member {string} [nextLink] The continuation token. */ -export interface ExperimentListResult extends Array { +export interface RemoteLoginInformationListResult extends Array { readonly nextLink?: string; } /** * @class - * Initializes a new instance of the JobListResult class. + * Initializes a new instance of the FileServerListResult class. * @constructor - * Values returned by the List operation. + * Values returned by the File Server List operation. * * @member {string} [nextLink] The continuation token. */ -export interface JobListResult extends Array { +export interface FileServerListResult extends Array { readonly nextLink?: string; } /** * @class - * Initializes a new instance of the FileListResult class. + * Initializes a new instance of the ClusterListResult class. * @constructor - * Values returned by the List operation. + * Values returned by the List Clusters operation. * * @member {string} [nextLink] The continuation token. */ -export interface FileListResult extends Array { +export interface ClusterListResult extends Array { readonly nextLink?: string; } diff --git a/lib/services/batchaiManagement/lib/models/index.js b/lib/services/batchaiManagement/lib/models/index.js index 4cfac58e09..0d16c21e68 100644 --- a/lib/services/batchaiManagement/lib/models/index.js +++ b/lib/services/batchaiManagement/lib/models/index.js @@ -25,7 +25,7 @@ exports.SshConfiguration = require('./sshConfiguration'); exports.DataDisks = require('./dataDisks'); exports.ResourceId = require('./resourceId'); exports.MountSettings = require('./mountSettings'); -exports.Resource = require('./resource'); +exports.ProxyResource = require('./proxyResource'); exports.FileServer = require('./fileServer'); exports.KeyVaultSecretReference = require('./keyVaultSecretReference'); exports.FileServerCreateParameters = require('./fileServerCreateParameters'); @@ -71,32 +71,29 @@ exports.JobBasePropertiesConstraints = require('./jobBasePropertiesConstraints') exports.JobCreateParameters = require('./jobCreateParameters'); exports.JobPropertiesConstraints = require('./jobPropertiesConstraints'); exports.JobPropertiesExecutionInfo = require('./jobPropertiesExecutionInfo'); -exports.ProxyResource = require('./proxyResource'); exports.Job = require('./job'); exports.RemoteLoginInformation = require('./remoteLoginInformation'); exports.File = require('./file'); +exports.Resource = require('./resource'); exports.OperationDisplay = require('./operationDisplay'); exports.Operation = require('./operation'); exports.Workspace = require('./workspace'); exports.WorkspaceCreateParameters = require('./workspaceCreateParameters'); +exports.WorkspaceUpdateParameters = require('./workspaceUpdateParameters'); exports.Experiment = require('./experiment'); -exports.ClustersListOptions = require('./clustersListOptions'); -exports.ClustersListByResourceGroupOptions = require('./clustersListByResourceGroupOptions'); -exports.ClustersListByWorkspaceOptions = require('./clustersListByWorkspaceOptions'); -exports.FileServersListOptions = require('./fileServersListOptions'); -exports.FileServersListByResourceGroupOptions = require('./fileServersListByResourceGroupOptions'); -exports.FileServersListByWorkspaceOptions = require('./fileServersListByWorkspaceOptions'); exports.WorkspacesListOptions = require('./workspacesListOptions'); exports.WorkspacesListByResourceGroupOptions = require('./workspacesListByResourceGroupOptions'); exports.ExperimentsListByWorkspaceOptions = require('./experimentsListByWorkspaceOptions'); exports.JobsListByExperimentOptions = require('./jobsListByExperimentOptions'); exports.JobsListOutputFilesOptions = require('./jobsListOutputFilesOptions'); +exports.FileServersListByWorkspaceOptions = require('./fileServersListByWorkspaceOptions'); +exports.ClustersListByWorkspaceOptions = require('./clustersListByWorkspaceOptions'); exports.OperationListResult = require('./operationListResult'); exports.ListUsagesResult = require('./listUsagesResult'); -exports.ClusterListResult = require('./clusterListResult'); -exports.RemoteLoginInformationListResult = require('./remoteLoginInformationListResult'); -exports.FileServerListResult = require('./fileServerListResult'); exports.WorkspaceListResult = require('./workspaceListResult'); exports.ExperimentListResult = require('./experimentListResult'); exports.JobListResult = require('./jobListResult'); exports.FileListResult = require('./fileListResult'); +exports.RemoteLoginInformationListResult = require('./remoteLoginInformationListResult'); +exports.FileServerListResult = require('./fileServerListResult'); +exports.ClusterListResult = require('./clusterListResult'); diff --git a/lib/services/batchaiManagement/lib/models/inputDirectory.js b/lib/services/batchaiManagement/lib/models/inputDirectory.js index a8c22b5eba..ae930ade3c 100644 --- a/lib/services/batchaiManagement/lib/models/inputDirectory.js +++ b/lib/services/batchaiManagement/lib/models/inputDirectory.js @@ -17,10 +17,10 @@ class InputDirectory { /** * Create a InputDirectory. - * @member {string} id The id for the input directory. The path of the input - * directory will be available as a value of an environment variable with - * AZ_BATCHAI_INPUT_ name, where is the value of id attribute. - * @member {string} path The path to the input directory. + * @member {string} id ID. The ID for the input directory. The job can use + * AZ_BATCHAI_INPUT_ environment variable to find the directory path, + * where is the value of id attribute. + * @member {string} path Path. The path to the input directory. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/job.js b/lib/services/batchaiManagement/lib/models/job.js index 9c3c0d88e9..4f8c9c6713 100644 --- a/lib/services/batchaiManagement/lib/models/job.js +++ b/lib/services/batchaiManagement/lib/models/job.js @@ -13,193 +13,244 @@ const models = require('./index'); /** - * Contains information about a Job. + * Information about a Job. * * @extends models['ProxyResource'] */ class Job extends models['ProxyResource'] { /** * Create a Job. - * @member {string} [priority] Priority associated with the job. Priority - * associated with the job. Possible values include: 'low', 'normal', 'high'. - * Default value: 'normal' . - * @member {object} [cluster] Specifies the Id of the cluster on which this - * job will run. + * @member {string} [schedulingPriority] Scheduling priority. Scheduling + * priority associated with the job. Possible values include: 'low', + * 'normal', 'high'. Default value: 'normal' . + * @member {object} [cluster] Cluster. Resource ID of the cluster associated + * with the job. * @member {string} [cluster.id] The ID of the resource - * @member {object} [mountVolumes] Information on mount volumes to be used by - * the job. These volumes will be mounted before the job execution and will - * be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. - * @member {array} [mountVolumes.azureFileShares] References to Azure File + * @member {object} [mountVolumes] Mount volumes. Collection of mount volumes + * available to the job during execution. These volumes are mounted before + * the job execution and unmouted after the job completion. The volumes are + * mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment + * variable. + * @member {array} [mountVolumes.azureFileShares] A collection of Azure File * Shares that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.azureBlobFileSystems] References to Azure - * Blob FUSE that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.fileServers] - * @member {array} [mountVolumes.unmanagedFileSystems] - * @member {string} [jobOutputDirectoryPathSegment] A segment of job's output - * directories path created by BatchAI. Batch AI creates job's output - * directories under an unique path to avoid conflicts between jobs. This - * value contains a path segment generated by Batch AI to make the path - * unique and can be used to find the output directory on the node or mounted - * filesystem. + * @member {array} [mountVolumes.azureBlobFileSystems] A collection of Azure + * Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.fileServers] A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.unmanagedFileSystems] A collection of + * unmanaged file systems that are to be mounted to the cluster nodes. * @member {number} [nodeCount] Number of compute nodes to run the job on. * The job will be gang scheduled on that many compute nodes * @member {object} [containerSettings] If provided the job will run in the * specified container. If the container was downloaded as part of cluster * setup then the same container image will be used. If not provided, the job * will run on the VM. - * @member {object} [containerSettings.imageSourceRegistry] - * @member {string} [containerSettings.imageSourceRegistry.serverUrl] - * @member {string} [containerSettings.imageSourceRegistry.image] + * @member {object} [containerSettings.imageSourceRegistry] Information about + * docker image and docker registry to download the container from. + * @member {string} [containerSettings.imageSourceRegistry.serverUrl] URL for + * image repository. + * @member {string} [containerSettings.imageSourceRegistry.image] The name of + * the image in the image repository. * @member {object} [containerSettings.imageSourceRegistry.credentials] + * Credentials to access the private docker repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.username] + * [containerSettings.imageSourceRegistry.credentials.username] User name to + * login to the repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.password] One of - * password or passwordSecretReference must be specified. + * [containerSettings.imageSourceRegistry.credentials.password] User password + * to login to the docker repository. One of password or + * passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Users can store their secrets in Azure KeyVault and pass it to the Batch - * AI Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * KeyVault Secret storing the password. Users can store their secrets in + * Azure KeyVault and pass it to the Batch AI service to integrate with + * KeyVault. One of password or passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id] * The ID of the resource * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl] + * The URL referencing a secret in the Key Vault. + * @member {string} [containerSettings.shmSize] Size of /dev/shm. Please + * refer to docker documentation for supported argument formats. * @member {string} [toolType] The toolkit type of this job. Possible values * are: cntk, tensorflow, caffe, caffe2, chainer, pytorch, custom, mpi, * horovod. Possible values include: 'cntk', 'tensorflow', 'caffe', 'caffe2', * 'chainer', 'horovod', 'mpi', 'custom' * @member {object} [cntkSettings] Specifies the settings for CNTK (aka * Microsoft Cognitive Toolkit) job. - * @member {string} [cntkSettings.languageType] Valid values are + * @member {string} [cntkSettings.languageType] The language to use for + * launching CNTK (aka Microsoft Cognitive Toolkit) job. Valid values are * 'BrainScript' or 'Python'. - * @member {string} [cntkSettings.configFilePath] This property can be - * specified only if the languageType is 'BrainScript'. - * @member {string} [cntkSettings.pythonScriptFilePath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.pythonInterpreterPath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.commandLineArgs] - * @member {number} [cntkSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [cntkSettings.configFilePath] Specifies the path of the + * BrainScript config file. This property can be specified only if the + * languageType is 'BrainScript'. + * @member {string} [cntkSettings.pythonScriptFilePath] Python script to + * execute. This property can be specified only if the languageType is + * 'Python'. + * @member {string} [cntkSettings.pythonInterpreterPath] The path to the + * Python interpreter. This property can be specified only if the + * languageType is 'Python'. + * @member {string} [cntkSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script or cntk executable. + * @member {number} [cntkSettings.processCount] Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property * @member {object} [pyTorchSettings] Specifies the settings for pyTorch job. - * @member {string} [pyTorchSettings.pythonScriptFilePath] - * @member {string} [pyTorchSettings.pythonInterpreterPath] - * @member {string} [pyTorchSettings.commandLineArgs] - * @member {number} [pyTorchSettings.processCount] The default value for this - * property is equal to nodeCount property. - * @member {string} [pyTorchSettings.communicationBackend] Valid values are - * 'TCP', 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @member {string} [pyTorchSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [pyTorchSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [pyTorchSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [pyTorchSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {string} [pyTorchSettings.communicationBackend] Type of the + * communication backend for distributed jobs. Valid values are 'TCP', 'Gloo' + * or 'MPI'. Not required for non-distributed jobs. * @member {object} [tensorFlowSettings] Specifies the settings for Tensor * Flow job. - * @member {string} [tensorFlowSettings.pythonScriptFilePath] - * @member {string} [tensorFlowSettings.pythonInterpreterPath] - * @member {string} [tensorFlowSettings.masterCommandLineArgs] - * @member {string} [tensorFlowSettings.workerCommandLineArgs] This property - * is optional for single machine training. - * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] This - * property is optional for single machine training. - * @member {number} [tensorFlowSettings.workerCount] If specified, the value - * must be less than or equal to (nodeCount * numberOfGPUs per VM). If not - * specified, the default value is equal to nodeCount. This property can be - * specified only for distributed TensorFlow training - * @member {number} [tensorFlowSettings.parameterServerCount] If specified, - * the value must be less than or equal to nodeCount. If not specified, the - * default value is equal to 1 for distributed TensorFlow training (This - * property is not applicable for single machine training). This property can - * be specified only for distributed TensorFlow training. + * @member {string} [tensorFlowSettings.pythonScriptFilePath] The python + * script to execute. + * @member {string} [tensorFlowSettings.pythonInterpreterPath] The path to + * the Python interpreter. + * @member {string} [tensorFlowSettings.masterCommandLineArgs] Command line + * arguments that need to be passed to the python script for the master task. + * @member {string} [tensorFlowSettings.workerCommandLineArgs] Command line + * arguments that need to be passed to the python script for the worker task. + * Optional for single process jobs. + * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] + * Command line arguments that need to be passed to the python script for the + * parameter server. Optional for single process jobs. + * @member {number} [tensorFlowSettings.workerCount] The number of worker + * tasks. If specified, the value must be less than or equal to (nodeCount * + * numberOfGPUs per VM). If not specified, the default value is equal to + * nodeCount. This property can be specified only for distributed TensorFlow + * training. + * @member {number} [tensorFlowSettings.parameterServerCount] The number of + * parameter server tasks. If specified, the value must be less than or equal + * to nodeCount. If not specified, the default value is equal to 1 for + * distributed TensorFlow training. This property can be specified only for + * distributed TensorFlow training. * @member {object} [caffeSettings] Specifies the settings for Caffe job. - * @member {string} [caffeSettings.configFilePath] This property cannot be - * specified if pythonScriptFilePath is specified. - * @member {string} [caffeSettings.pythonScriptFilePath] This property cannot - * be specified if configFilePath is specified. - * @member {string} [caffeSettings.pythonInterpreterPath] This property can - * be specified only if the pythonScriptFilePath is specified. - * @member {string} [caffeSettings.commandLineArgs] - * @member {number} [caffeSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [caffeSettings.configFilePath] Path of the config file + * for the job. This property cannot be specified if pythonScriptFilePath is + * specified. + * @member {string} [caffeSettings.pythonScriptFilePath] Python script to + * execute. This property cannot be specified if configFilePath is specified. + * @member {string} [caffeSettings.pythonInterpreterPath] The path to the + * Python interpreter. The property can be specified only if the + * pythonScriptFilePath is specified. + * @member {string} [caffeSettings.commandLineArgs] Command line arguments + * that need to be passed to the Caffe job. + * @member {number} [caffeSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [caffe2Settings] Specifies the settings for Caffe2 job. + * @member {string} [caffe2Settings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [caffe2Settings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [caffe2Settings.commandLineArgs] Command line arguments + * that need to be passed to the python script. * @member {object} [chainerSettings] Specifies the settings for Chainer job. - * @member {string} [chainerSettings.pythonScriptFilePath] - * @member {string} [chainerSettings.pythonInterpreterPath] - * @member {string} [chainerSettings.commandLineArgs] - * @member {number} [chainerSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [chainerSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [chainerSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [chainerSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [chainerSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property * @member {object} [customToolkitSettings] Specifies the settings for custom * tool kit job. - * @member {string} [customToolkitSettings.commandLine] + * @member {string} [customToolkitSettings.commandLine] The command line to + * execute on the master node. * @member {object} [customMpiSettings] Specifies the settings for custom MPI * job. - * @member {string} [customMpiSettings.commandLine] - * @member {number} [customMpiSettings.processCount] The default value for - * this property is equal to nodeCount property + * @member {string} [customMpiSettings.commandLine] The command line to be + * executed by mpi runtime on each compute node. + * @member {number} [customMpiSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property * @member {object} [horovodSettings] Specifies the settings for Horovod job. - * @member {string} [horovodSettings.pythonScriptFilePath] - * @member {string} [horovodSettings.pythonInterpreterPath] - * @member {string} [horovodSettings.commandLineArgs] - * @member {number} [horovodSettings.processCount] The default value for this - * property is equal to nodeCount property + * @member {string} [horovodSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [horovodSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [horovodSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [horovodSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property * @member {object} [jobPreparation] Specifies the actions to be performed * before tool kit is launched. The specified actions will run on all the * nodes that are part of the job - * @member {string} [jobPreparation.commandLine] If containerSettings is - * specified on the job, this commandLine will be executed in the same - * container as job. Otherwise it will be executed on the node. - * @member {string} [stdOutErrPathPrefix] The path where the Batch AI service - * will upload stdout and stderror of the job. - * @member {array} [inputDirectories] Specifies the list of input directories - * for the Job. - * @member {array} [outputDirectories] Specifies the list of output - * directories where the models will be created. - * @member {array} [environmentVariables] Additional environment variables to - * set on the job. Batch AI will setup these additional environment variables - * for the job. - * @member {array} [secrets] Additional environment variables with secret - * values to set on the job. Batch AI will setup these additional environment - * variables for the job. Server will never report values of these variables - * back. + * @member {string} [jobPreparation.commandLine] The command line to execute. + * If containerSettings is specified on the job, this commandLine will be + * executed in the same container as job. Otherwise it will be executed on + * the node. + * @member {string} [jobOutputDirectoryPathSegment] Output directory path + * segment. A segment of job's output directories path created by Batch AI. + * Batch AI creates job's output directories under an unique path to avoid + * conflicts between jobs. This value contains a path segment generated by + * Batch AI to make the path unique and can be used to find the output + * directory on the node or mounted filesystem. + * @member {string} [stdOutErrPathPrefix] Standard output directory path + * prefix. The path where the Batch AI service stores stdout, stderror and + * execution log of the job. + * @member {array} [inputDirectories] Input directories. A list of input + * directories for the job. + * @member {array} [outputDirectories] Output directories. A list of output + * directories for the job. + * @member {array} [environmentVariables] Environment variables. A collection + * of user defined environment variables to be setup for the job. + * @member {array} [secrets] Secrets. A collection of user defined + * environment variables with secret values to be setup for the job. Server + * will never report values of these variables back. * @member {object} [constraints] Constraints associated with the Job. - * @member {moment.duration} [constraints.maxWallClockTime] Default Value = 1 - * week. - * @member {date} [creationTime] The job creation time. The creation time of - * the job. - * @member {string} [provisioningState] The provisioned state of the Batch AI - * job. Possible values include: 'creating', 'succeeded', 'failed', - * 'deleting' - * @member {date} [provisioningStateTransitionTime] The time at which the job - * entered its current provisioning state. The time at which the job entered - * its current provisioning state. - * @member {string} [executionState] The current state of the job. The - * current state of the job. Possible values are: queued - The job is queued - * and able to run. A job enters this state when it is created, or when it is - * awaiting a retry after a failed run. running - The job is running on a - * compute cluster. This includes job-level preparation such as downloading - * resource files or set up container specified on the job - it does not - * necessarily mean that the job command line has started executing. - * terminating - The job is terminated by the user, the terminate operation - * is in progress. succeeded - The job has completed running succesfully and - * exited with exit code 0. failed - The job has finished unsuccessfully - * (failed with a non-zero exit code) and has exhausted its retry limit. A - * job is also marked as failed if an error occurred launching the job. - * Possible values include: 'queued', 'running', 'terminating', 'succeeded', - * 'failed' - * @member {date} [executionStateTransitionTime] The time at which the job - * entered its current execution state. The time at which the job entered its - * current execution state. - * @member {object} [executionInfo] Contains information about the execution - * of a job in the Azure Batch service. - * @member {date} [executionInfo.startTime] 'Running' corresponds to the - * running state. If the job has been restarted or retried, this is the most - * recent time at which the job started running. This property is present - * only for job that are in the running or completed state. - * @member {date} [executionInfo.endTime] This property is only returned if - * the job is in completed state. - * @member {number} [executionInfo.exitCode] This property is only returned - * if the job is in completed state. - * @member {array} [executionInfo.errors] + * @member {moment.duration} [constraints.maxWallClockTime] Max time the job + * can run. Default value: 1 week. + * @member {date} [creationTime] Creation time. The creation time of the job. + * @member {string} [provisioningState] Provisioning state. The provisioned + * state of the Batch AI job. Possible values include: 'creating', + * 'succeeded', 'failed', 'deleting' + * @member {date} [provisioningStateTransitionTime] Provisioning state + * transition time. The time at which the job entered its current + * provisioning state. + * @member {string} [executionState] Execution state. The current state of + * the job. Possible values are: queued - The job is queued and able to run. + * A job enters this state when it is created, or when it is awaiting a retry + * after a failed run. running - The job is running on a compute cluster. + * This includes job-level preparation such as downloading resource files or + * set up container specified on the job - it does not necessarily mean that + * the job command line has started executing. terminating - The job is + * terminated by the user, the terminate operation is in progress. succeeded + * - The job has completed running succesfully and exited with exit code 0. + * failed - The job has finished unsuccessfully (failed with a non-zero exit + * code) and has exhausted its retry limit. A job is also marked as failed if + * an error occurred launching the job. Possible values include: 'queued', + * 'running', 'terminating', 'succeeded', 'failed' + * @member {date} [executionStateTransitionTime] Execution state transition + * time. The time at which the job entered its current execution state. + * @member {object} [executionInfo] Information about the execution of a job. + * @member {date} [executionInfo.startTime] The time at which the job started + * running. 'Running' corresponds to the running state. If the job has been + * restarted or retried, this is the most recent time at which the job + * started running. This property is present only for job that are in the + * running or completed state. + * @member {date} [executionInfo.endTime] The time at which the job + * completed. This property is only returned if the job is in completed + * state. + * @member {number} [executionInfo.exitCode] The exit code of the job. This + * property is only returned if the job is in completed state. + * @member {array} [executionInfo.errors] A collection of errors encountered + * by the service during job execution. */ constructor() { super(); @@ -243,9 +294,9 @@ class Job extends models['ProxyResource'] { name: 'String' } }, - priority: { + schedulingPriority: { required: false, - serializedName: 'properties.priority', + serializedName: 'properties.schedulingPriority', defaultValue: 'normal', type: { name: 'String' @@ -267,14 +318,6 @@ class Job extends models['ProxyResource'] { className: 'MountVolumes' } }, - jobOutputDirectoryPathSegment: { - required: false, - readOnly: true, - serializedName: 'properties.jobOutputDirectoryPathSegment', - type: { - name: 'String' - } - }, nodeCount: { required: false, serializedName: 'properties.nodeCount', @@ -329,6 +372,14 @@ class Job extends models['ProxyResource'] { className: 'CaffeSettings' } }, + caffe2Settings: { + required: false, + serializedName: 'properties.caffe2Settings', + type: { + name: 'Composite', + className: 'Caffe2Settings' + } + }, chainerSettings: { required: false, serializedName: 'properties.chainerSettings', @@ -369,6 +420,14 @@ class Job extends models['ProxyResource'] { className: 'JobPreparation' } }, + jobOutputDirectoryPathSegment: { + required: false, + readOnly: true, + serializedName: 'properties.jobOutputDirectoryPathSegment', + type: { + name: 'String' + } + }, stdOutErrPathPrefix: { required: false, serializedName: 'properties.stdOutErrPathPrefix', diff --git a/lib/services/batchaiManagement/lib/models/jobBasePropertiesConstraints.js b/lib/services/batchaiManagement/lib/models/jobBasePropertiesConstraints.js index fa4945f26a..aba020c0b2 100644 --- a/lib/services/batchaiManagement/lib/models/jobBasePropertiesConstraints.js +++ b/lib/services/batchaiManagement/lib/models/jobBasePropertiesConstraints.js @@ -17,8 +17,9 @@ class JobBasePropertiesConstraints { /** * Create a JobBasePropertiesConstraints. - * @member {moment.duration} [maxWallClockTime] Max time the job can run. - * Default Value = 1 week. Default value: moment.duration('7.00:00:00') . + * @member {moment.duration} [maxWallClockTime] Max wall clock time. Max time + * the job can run. Default value: 1 week. Default value: + * moment.duration('7.00:00:00') . */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/jobCreateParameters.js b/lib/services/batchaiManagement/lib/models/jobCreateParameters.js index 4f6a76fc50..17fefbaa02 100644 --- a/lib/services/batchaiManagement/lib/models/jobCreateParameters.js +++ b/lib/services/batchaiManagement/lib/models/jobCreateParameters.js @@ -13,150 +13,201 @@ const models = require('./index'); /** - * Parameters supplied to the Create operation. + * Job creation parameters. * */ class JobCreateParameters { /** * Create a JobCreateParameters. - * @member {string} [schedulingPriority] Scheduling priority associated with - * the job. Scheduling priority associated with the job. Possible values - * include: 'low', 'normal', 'high'. Default value: 'normal' . - * @member {object} cluster Specifies the Id of the cluster on which this job - * will run. + * @member {string} [schedulingPriority] Scheduling priority. Scheduling + * priority associated with the job. Possible values: low, normal, high. + * Possible values include: 'low', 'normal', 'high'. Default value: 'normal' + * . + * @member {object} cluster Cluster. Resource ID of the cluster on which this + * job will run. * @member {string} [cluster.id] The ID of the resource - * @member {object} [mountVolumes] Information on mount volumes to be used by - * the job. These volumes will be mounted before the job execution and will - * be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. - * @member {array} [mountVolumes.azureFileShares] References to Azure File + * @member {object} [mountVolumes] Mount volumes. Information on mount + * volumes to be used by the job. These volumes will be mounted before the + * job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. + * @member {array} [mountVolumes.azureFileShares] A collection of Azure File * Shares that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.azureBlobFileSystems] References to Azure - * Blob FUSE that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.fileServers] - * @member {array} [mountVolumes.unmanagedFileSystems] - * @member {number} nodeCount Number of compute nodes to run the job on. The - * job will be gang scheduled on that many compute nodes - * @member {object} [containerSettings] If provided the job will run in the - * specified container. If the container was downloaded as part of cluster - * setup then the same container image will be used. If not provided, the job - * will run on the VM. - * @member {object} [containerSettings.imageSourceRegistry] - * @member {string} [containerSettings.imageSourceRegistry.serverUrl] - * @member {string} [containerSettings.imageSourceRegistry.image] + * @member {array} [mountVolumes.azureBlobFileSystems] A collection of Azure + * Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.fileServers] A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.unmanagedFileSystems] A collection of + * unmanaged file systems that are to be mounted to the cluster nodes. + * @member {number} nodeCount Node count. Number of compute nodes to run the + * job on. The job will be gang scheduled on that many compute nodes. + * @member {object} [containerSettings] Container settings. Docker container + * settings for the job. If not provided, the job will run directly on the + * node. + * @member {object} [containerSettings.imageSourceRegistry] Information about + * docker image and docker registry to download the container from. + * @member {string} [containerSettings.imageSourceRegistry.serverUrl] URL for + * image repository. + * @member {string} [containerSettings.imageSourceRegistry.image] The name of + * the image in the image repository. * @member {object} [containerSettings.imageSourceRegistry.credentials] + * Credentials to access the private docker repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.username] + * [containerSettings.imageSourceRegistry.credentials.username] User name to + * login to the repository. * @member {string} - * [containerSettings.imageSourceRegistry.credentials.password] One of - * password or passwordSecretReference must be specified. + * [containerSettings.imageSourceRegistry.credentials.password] User password + * to login to the docker repository. One of password or + * passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Users can store their secrets in Azure KeyVault and pass it to the Batch - * AI Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * KeyVault Secret storing the password. Users can store their secrets in + * Azure KeyVault and pass it to the Batch AI service to integrate with + * KeyVault. One of password or passwordSecretReference must be specified. * @member {object} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id] * The ID of the resource * @member {string} * [containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl] - * @member {object} [cntkSettings] Specifies the settings for CNTK (aka + * The URL referencing a secret in the Key Vault. + * @member {string} [containerSettings.shmSize] Size of /dev/shm. Please + * refer to docker documentation for supported argument formats. + * @member {object} [cntkSettings] CNTK settings. Settings for CNTK (aka * Microsoft Cognitive Toolkit) job. - * @member {string} [cntkSettings.languageType] Valid values are + * @member {string} [cntkSettings.languageType] The language to use for + * launching CNTK (aka Microsoft Cognitive Toolkit) job. Valid values are * 'BrainScript' or 'Python'. - * @member {string} [cntkSettings.configFilePath] This property can be - * specified only if the languageType is 'BrainScript'. - * @member {string} [cntkSettings.pythonScriptFilePath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.pythonInterpreterPath] This property can be - * specified only if the languageType is 'Python'. - * @member {string} [cntkSettings.commandLineArgs] - * @member {number} [cntkSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [pyTorchSettings] Specifies the settings for pyTorch job. - * @member {string} [pyTorchSettings.pythonScriptFilePath] - * @member {string} [pyTorchSettings.pythonInterpreterPath] - * @member {string} [pyTorchSettings.commandLineArgs] - * @member {number} [pyTorchSettings.processCount] The default value for this - * property is equal to nodeCount property. - * @member {string} [pyTorchSettings.communicationBackend] Valid values are - * 'TCP', 'Gloo' or 'MPI'. Not required for non-distributed jobs. - * @member {object} [tensorFlowSettings] Specifies the settings for Tensor - * Flow job. - * @member {string} [tensorFlowSettings.pythonScriptFilePath] - * @member {string} [tensorFlowSettings.pythonInterpreterPath] - * @member {string} [tensorFlowSettings.masterCommandLineArgs] - * @member {string} [tensorFlowSettings.workerCommandLineArgs] This property - * is optional for single machine training. - * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] This - * property is optional for single machine training. - * @member {number} [tensorFlowSettings.workerCount] If specified, the value - * must be less than or equal to (nodeCount * numberOfGPUs per VM). If not - * specified, the default value is equal to nodeCount. This property can be - * specified only for distributed TensorFlow training - * @member {number} [tensorFlowSettings.parameterServerCount] If specified, - * the value must be less than or equal to nodeCount. If not specified, the - * default value is equal to 1 for distributed TensorFlow training (This - * property is not applicable for single machine training). This property can - * be specified only for distributed TensorFlow training. - * @member {object} [caffeSettings] Specifies the settings for Caffe job. - * @member {string} [caffeSettings.configFilePath] This property cannot be - * specified if pythonScriptFilePath is specified. - * @member {string} [caffeSettings.pythonScriptFilePath] This property cannot - * be specified if configFilePath is specified. - * @member {string} [caffeSettings.pythonInterpreterPath] This property can - * be specified only if the pythonScriptFilePath is specified. - * @member {string} [caffeSettings.commandLineArgs] - * @member {number} [caffeSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [caffe2Settings] Specifies the settings for Caffe2 job. - * @member {string} [caffe2Settings.pythonScriptFilePath] - * @member {string} [caffe2Settings.pythonInterpreterPath] - * @member {string} [caffe2Settings.commandLineArgs] - * @member {object} [chainerSettings] Specifies the settings for Chainer job. - * @member {string} [chainerSettings.pythonScriptFilePath] - * @member {string} [chainerSettings.pythonInterpreterPath] - * @member {string} [chainerSettings.commandLineArgs] - * @member {number} [chainerSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [customToolkitSettings] Specifies the settings for custom - * tool kit job. - * @member {string} [customToolkitSettings.commandLine] - * @member {object} [customMpiSettings] Specifies the settings for custom MPI + * @member {string} [cntkSettings.configFilePath] Specifies the path of the + * BrainScript config file. This property can be specified only if the + * languageType is 'BrainScript'. + * @member {string} [cntkSettings.pythonScriptFilePath] Python script to + * execute. This property can be specified only if the languageType is + * 'Python'. + * @member {string} [cntkSettings.pythonInterpreterPath] The path to the + * Python interpreter. This property can be specified only if the + * languageType is 'Python'. + * @member {string} [cntkSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script or cntk executable. + * @member {number} [cntkSettings.processCount] Number of processes to launch + * for the job execution. The default value for this property is equal to + * nodeCount property + * @member {object} [pyTorchSettings] pyTorch settings. Settings for pyTorch + * job. + * @member {string} [pyTorchSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [pyTorchSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [pyTorchSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [pyTorchSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {string} [pyTorchSettings.communicationBackend] Type of the + * communication backend for distributed jobs. Valid values are 'TCP', 'Gloo' + * or 'MPI'. Not required for non-distributed jobs. + * @member {object} [tensorFlowSettings] TensorFlow settings. Settings for + * Tensor Flow job. + * @member {string} [tensorFlowSettings.pythonScriptFilePath] The python + * script to execute. + * @member {string} [tensorFlowSettings.pythonInterpreterPath] The path to + * the Python interpreter. + * @member {string} [tensorFlowSettings.masterCommandLineArgs] Command line + * arguments that need to be passed to the python script for the master task. + * @member {string} [tensorFlowSettings.workerCommandLineArgs] Command line + * arguments that need to be passed to the python script for the worker task. + * Optional for single process jobs. + * @member {string} [tensorFlowSettings.parameterServerCommandLineArgs] + * Command line arguments that need to be passed to the python script for the + * parameter server. Optional for single process jobs. + * @member {number} [tensorFlowSettings.workerCount] The number of worker + * tasks. If specified, the value must be less than or equal to (nodeCount * + * numberOfGPUs per VM). If not specified, the default value is equal to + * nodeCount. This property can be specified only for distributed TensorFlow + * training. + * @member {number} [tensorFlowSettings.parameterServerCount] The number of + * parameter server tasks. If specified, the value must be less than or equal + * to nodeCount. If not specified, the default value is equal to 1 for + * distributed TensorFlow training. This property can be specified only for + * distributed TensorFlow training. + * @member {object} [caffeSettings] Caffe settings. Settings for Caffe job. + * @member {string} [caffeSettings.configFilePath] Path of the config file + * for the job. This property cannot be specified if pythonScriptFilePath is + * specified. + * @member {string} [caffeSettings.pythonScriptFilePath] Python script to + * execute. This property cannot be specified if configFilePath is specified. + * @member {string} [caffeSettings.pythonInterpreterPath] The path to the + * Python interpreter. The property can be specified only if the + * pythonScriptFilePath is specified. + * @member {string} [caffeSettings.commandLineArgs] Command line arguments + * that need to be passed to the Caffe job. + * @member {number} [caffeSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [caffe2Settings] Caffe2 settings. Settings for Caffe2 + * job. + * @member {string} [caffe2Settings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [caffe2Settings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [caffe2Settings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {object} [chainerSettings] Chainer settings. Settings for Chainer + * job. + * @member {string} [chainerSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [chainerSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [chainerSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [chainerSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [customToolkitSettings] Custom tool kit job. Settings for + * custom tool kit job. + * @member {string} [customToolkitSettings.commandLine] The command line to + * execute on the master node. + * @member {object} [customMpiSettings] Custom MPI settings. Settings for + * custom MPI job. + * @member {string} [customMpiSettings.commandLine] The command line to be + * executed by mpi runtime on each compute node. + * @member {number} [customMpiSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [horovodSettings] Horovod settings. Settings for Horovod * job. - * @member {string} [customMpiSettings.commandLine] - * @member {number} [customMpiSettings.processCount] The default value for - * this property is equal to nodeCount property - * @member {object} [horovodSettings] Specifies the settings for Horovod job. - * @member {string} [horovodSettings.pythonScriptFilePath] - * @member {string} [horovodSettings.pythonInterpreterPath] - * @member {string} [horovodSettings.commandLineArgs] - * @member {number} [horovodSettings.processCount] The default value for this - * property is equal to nodeCount property - * @member {object} [jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on - * all the nodes that are part of the job - * @member {string} [jobPreparation.commandLine] If containerSettings is - * specified on the job, this commandLine will be executed in the same - * container as job. Otherwise it will be executed on the node. - * @member {string} stdOutErrPathPrefix The path where the Batch AI service - * will upload stdout and stderror of the job. - * @member {array} [inputDirectories] Specifies the list of input directories - * for the Job. - * @member {array} [outputDirectories] Specifies the list of output - * directories. - * @member {array} [environmentVariables] Additional environment variables to - * set on the job. Batch AI will setup these additional environment variables - * for the job. - * @member {array} [secrets] Additional environment variables with secret - * values to set on the job. Batch AI will setup these additional environment - * variables for the job. Server will never report values of these variables - * back. + * @member {string} [horovodSettings.pythonScriptFilePath] The python script + * to execute. + * @member {string} [horovodSettings.pythonInterpreterPath] The path to the + * Python interpreter. + * @member {string} [horovodSettings.commandLineArgs] Command line arguments + * that need to be passed to the python script. + * @member {number} [horovodSettings.processCount] Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {object} [jobPreparation] Job preparation. A command line to be + * executed on each node allocated for the job before tool kit is launched. + * @member {string} [jobPreparation.commandLine] The command line to execute. + * If containerSettings is specified on the job, this commandLine will be + * executed in the same container as job. Otherwise it will be executed on + * the node. + * @member {string} stdOutErrPathPrefix Standard output path prefix. The path + * where the Batch AI service will store stdout, stderror and execution log + * of the job. + * @member {array} [inputDirectories] Input directories. A list of input + * directories for the job. + * @member {array} [outputDirectories] Output directories. A list of output + * directories for the job. + * @member {array} [environmentVariables] Environment variables. A list of + * user defined environment variables which will be setup for the job. + * @member {array} [secrets] Secrets. A list of user defined environment + * variables with secret values which will be setup for the job. Server will + * never report values of these variables back. * @member {object} [constraints] Constraints associated with the Job. - * @member {moment.duration} [constraints.maxWallClockTime] Default Value = 1 - * week. + * @member {moment.duration} [constraints.maxWallClockTime] Max time the job + * can run. Default value: 1 week. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/jobPreparation.js b/lib/services/batchaiManagement/lib/models/jobPreparation.js index 46e3e82b95..b975eb097e 100644 --- a/lib/services/batchaiManagement/lib/models/jobPreparation.js +++ b/lib/services/batchaiManagement/lib/models/jobPreparation.js @@ -11,13 +11,13 @@ 'use strict'; /** - * Specifies the settings for job preparation. + * Job preparation settings. * */ class JobPreparation { /** * Create a JobPreparation. - * @member {string} commandLine The command line to execute. If + * @member {string} commandLine Command line. The command line to execute. If * containerSettings is specified on the job, this commandLine will be * executed in the same container as job. Otherwise it will be executed on * the node. diff --git a/lib/services/batchaiManagement/lib/models/jobPropertiesConstraints.js b/lib/services/batchaiManagement/lib/models/jobPropertiesConstraints.js index e439450bcd..9240e5a565 100644 --- a/lib/services/batchaiManagement/lib/models/jobPropertiesConstraints.js +++ b/lib/services/batchaiManagement/lib/models/jobPropertiesConstraints.js @@ -17,8 +17,9 @@ class JobPropertiesConstraints { /** * Create a JobPropertiesConstraints. - * @member {moment.duration} [maxWallClockTime] Max time the job can run. - * Default Value = 1 week. Default value: moment.duration('7.00:00:00') . + * @member {moment.duration} [maxWallClockTime] Max wall clock time. Max time + * the job can run. Default value: 1 week. Default value: + * moment.duration('7.00:00:00') . */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/jobPropertiesExecutionInfo.js b/lib/services/batchaiManagement/lib/models/jobPropertiesExecutionInfo.js index 6f20b24814..906457124e 100644 --- a/lib/services/batchaiManagement/lib/models/jobPropertiesExecutionInfo.js +++ b/lib/services/batchaiManagement/lib/models/jobPropertiesExecutionInfo.js @@ -13,24 +13,23 @@ const models = require('./index'); /** - * Contains information about the execution of a job in the Azure Batch - * service. + * Information about the execution of a job. * */ class JobPropertiesExecutionInfo { /** * Create a JobPropertiesExecutionInfo. - * @member {date} [startTime] The time at which the job started running. - * 'Running' corresponds to the running state. If the job has been restarted - * or retried, this is the most recent time at which the job started running. - * This property is present only for job that are in the running or completed - * state. - * @member {date} [endTime] The time at which the job completed. This + * @member {date} [startTime] Start time. The time at which the job started + * running. 'Running' corresponds to the running state. If the job has been + * restarted or retried, this is the most recent time at which the job + * started running. This property is present only for job that are in the + * running or completed state. + * @member {date} [endTime] End time. The time at which the job completed. + * This property is only returned if the job is in completed state. + * @member {number} [exitCode] Exit code. The exit code of the job. This * property is only returned if the job is in completed state. - * @member {number} [exitCode] The exit code of the job. This property is - * only returned if the job is in completed state. - * @member {array} [errors] Contains details of various errors encountered by - * the service during job execution. + * @member {array} [errors] Errors. A collection of errors encountered by the + * service during job execution. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/keyVaultSecretReference.js b/lib/services/batchaiManagement/lib/models/keyVaultSecretReference.js index 01153ab50b..70a06a9553 100644 --- a/lib/services/batchaiManagement/lib/models/keyVaultSecretReference.js +++ b/lib/services/batchaiManagement/lib/models/keyVaultSecretReference.js @@ -13,16 +13,17 @@ const models = require('./index'); /** - * Describes a reference to Key Vault Secret. + * Key Vault Secret reference. * */ class KeyVaultSecretReference { /** * Create a KeyVaultSecretReference. - * @member {object} sourceVault Fully qualified resource Id for the Key - * Vault. + * @member {object} sourceVault Key Vault resource identifier. Fully + * qualified resource indentifier of the Key Vault. * @member {string} [sourceVault.id] The ID of the resource - * @member {string} secretUrl The URL referencing a secret in a Key Vault. + * @member {string} secretUrl Secret URL. The URL referencing a secret in the + * Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/manualScaleSettings.js b/lib/services/batchaiManagement/lib/models/manualScaleSettings.js index c246b44e60..9a48c18518 100644 --- a/lib/services/batchaiManagement/lib/models/manualScaleSettings.js +++ b/lib/services/batchaiManagement/lib/models/manualScaleSettings.js @@ -17,12 +17,11 @@ class ManualScaleSettings { /** * Create a ManualScaleSettings. - * @member {number} targetNodeCount The desired number of compute nodes in - * the Cluster. Default is 0. If autoScaleSettings are not specified, then - * the Cluster starts with this target. Default value: 0 . - * @member {string} [nodeDeallocationOption] Determines what to do with the - * job(s) running on compute node if the Cluster size is decreasing. The - * default value is requeue. Possible values include: 'requeue', 'terminate', + * @member {number} targetNodeCount Target node count. The desired number of + * compute nodes in the Cluster. Default is 0. Default value: 0 . + * @member {string} [nodeDeallocationOption] Node deallocation options. An + * action to be performed when the cluster size is decreasing. The default + * value is requeue. Possible values include: 'requeue', 'terminate', * 'waitforjobcompletion'. Default value: 'requeue' . */ constructor() { diff --git a/lib/services/batchaiManagement/lib/models/mountSettings.js b/lib/services/batchaiManagement/lib/models/mountSettings.js index 21f985a485..5f25cf9fab 100644 --- a/lib/services/batchaiManagement/lib/models/mountSettings.js +++ b/lib/services/batchaiManagement/lib/models/mountSettings.js @@ -11,16 +11,20 @@ 'use strict'; /** - * Details of the File Server. + * File Server mount Information. * */ class MountSettings { /** * Create a MountSettings. - * @member {string} [mountPoint] Path where the NFS is mounted on the Server. - * @member {string} [fileServerPublicIP] Public IP of the File Server VM. - * @member {string} [fileServerInternalIP] Internal subnet IP which can be - * used to access the file Server from within the subnet. + * @member {string} [mountPoint] Mount Point. Path where the data disks are + * mounted on the File Server. + * @member {string} [fileServerPublicIP] Public IP. Public IP address of the + * File Server which can be used to SSH to the node from outside of the + * subnet. + * @member {string} [fileServerInternalIP] Internal IP. Internal IP address + * of the File Server which can be used to access the File Server from within + * the subnet. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/mountVolumes.js b/lib/services/batchaiManagement/lib/models/mountVolumes.js index 8afa2a2564..ee6fa6a3b5 100644 --- a/lib/services/batchaiManagement/lib/models/mountVolumes.js +++ b/lib/services/batchaiManagement/lib/models/mountVolumes.js @@ -19,16 +19,16 @@ const models = require('./index'); class MountVolumes { /** * Create a MountVolumes. - * @member {array} [azureFileShares] Azure File Share setup configuration. - * References to Azure File Shares that are to be mounted to the cluster + * @member {array} [azureFileShares] Azure File Shares. A collection of Azure + * File Shares that are to be mounted to the cluster nodes. + * @member {array} [azureBlobFileSystems] Azure Blob file systems. A + * collection of Azure Blob Containers that are to be mounted to the cluster + * nodes. + * @member {array} [fileServers] File Servers. A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [unmanagedFileSystems] Unmanaged file systems. A + * collection of unmanaged file systems that are to be mounted to the cluster * nodes. - * @member {array} [azureBlobFileSystems] Azure Blob FileSystem setup - * configuration. References to Azure Blob FUSE that are to be mounted to the - * cluster nodes. - * @member {array} [fileServers] References to a list of file servers that - * are mounted to the cluster node. - * @member {array} [unmanagedFileSystems] References to a list of file - * servers that are mounted to the cluster node. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/nameValuePair.js b/lib/services/batchaiManagement/lib/models/nameValuePair.js index ce4609124e..da016b094d 100644 --- a/lib/services/batchaiManagement/lib/models/nameValuePair.js +++ b/lib/services/batchaiManagement/lib/models/nameValuePair.js @@ -11,14 +11,14 @@ 'use strict'; /** - * Represents a name-value pair. + * Name-value pair. * */ class NameValuePair { /** * Create a NameValuePair. - * @member {string} [name] The name in the name-value pair. - * @member {string} [value] The value in the name-value pair. + * @member {string} [name] Name. The name in the name-value pair. + * @member {string} [value] Value. The value in the name-value pair. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/nodeSetup.js b/lib/services/batchaiManagement/lib/models/nodeSetup.js index 0a90973737..8f77ea1b26 100644 --- a/lib/services/batchaiManagement/lib/models/nodeSetup.js +++ b/lib/services/batchaiManagement/lib/models/nodeSetup.js @@ -13,67 +13,75 @@ const models = require('./index'); /** - * Use this to prepare the VM. NOTE: The volumes specified in mountVolumes are - * mounted first and then the setupTask is run. Therefore the setup task can - * use local mountPaths in its execution. + * Node setup settings. * */ class NodeSetup { /** * Create a NodeSetup. - * @member {object} [setupTask] Specifies a setup task which can be used to - * customize the compute nodes of the cluster. The NodeSetup task runs - * everytime a VM is rebooted. For that reason the task code needs to be - * idempotent. Generally it is used to either download static data that is - * required for all jobs that run on the cluster VMs or to download/install - * software. - * @member {string} [setupTask.commandLine] Command line to be executed on - * each cluster's node after it being allocated or rebooted. The command is - * executed in a bash subshell as a root. - * @member {array} [setupTask.environmentVariables] - * @member {array} [setupTask.secrets] Server will never report values of - * these variables back. + * @member {object} [setupTask] Setup task. Setup task to run on cluster + * nodes when nodes got created or rebooted. The setup task code needs to be + * idempotent. Generally the setup task is used to download static data that + * is required for all jobs that run on the cluster VMs and/or to + * download/install software. + * @member {string} [setupTask.commandLine] The command line to be executed + * on each cluster's node after it being allocated or rebooted. The command + * is executed in a bash subshell as a root. + * @member {array} [setupTask.environmentVariables] A collection of user + * defined environment variables to be set for setup task. + * @member {array} [setupTask.secrets] A collection of user defined + * environment variables with secret values to be set for the setup task. + * Server will never report values of these variables back. * @member {string} [setupTask.stdOutErrPathPrefix] The prefix of a path - * where the Batch AI service will upload the stdout and stderr of the setup - * task. - * @member {string} [setupTask.stdOutErrPathSuffix] Batch AI creates the + * where the Batch AI service will upload the stdout, stderr and execution + * log of the setup task. + * @member {string} [setupTask.stdOutErrPathSuffix] A path segment appended + * by Batch AI to stdOutErrPathPrefix to form a path where stdout, stderr and + * execution log of the setup task will be uploaded. Batch AI creates the * setup task output directories under an unique path to avoid conflicts - * between different clusters. You can concatinate stdOutErrPathPrefix and - * stdOutErrPathSuffix to get the full path to the output directory. - * @member {object} [mountVolumes] Information on shared volumes to be used - * by jobs. Specified mount volumes will be available to all jobs executing - * on the cluster. The volumes will be mounted at location specified by - * $AZ_BATCHAI_MOUNT_ROOT environment variable. - * @member {array} [mountVolumes.azureFileShares] References to Azure File + * between different clusters. The full path can be obtained by concatenation + * of stdOutErrPathPrefix and stdOutErrPathSuffix. + * @member {object} [mountVolumes] Mount volumes. Mount volumes to be + * available to setup task and all jobs executing on the cluster. The volumes + * will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. + * @member {array} [mountVolumes.azureFileShares] A collection of Azure File * Shares that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.azureBlobFileSystems] References to Azure - * Blob FUSE that are to be mounted to the cluster nodes. - * @member {array} [mountVolumes.fileServers] - * @member {array} [mountVolumes.unmanagedFileSystems] - * @member {object} [performanceCountersSettings] Specifies settings for - * performance counters collecting and uploading. - * @member {object} [performanceCountersSettings.appInsightsReference] If + * @member {array} [mountVolumes.azureBlobFileSystems] A collection of Azure + * Blob Containers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.fileServers] A collection of Batch AI File + * Servers that are to be mounted to the cluster nodes. + * @member {array} [mountVolumes.unmanagedFileSystems] A collection of + * unmanaged file systems that are to be mounted to the cluster nodes. + * @member {object} [performanceCountersSettings] Performance counters + * settings. Settings for performance counters collecting and uploading. + * @member {object} [performanceCountersSettings.appInsightsReference] Azure + * Application Insights information for performance counters reporting. If * provided, Batch AI will upload node performance counters to the * corresponding Azure Application Insights account. * @member {object} - * [performanceCountersSettings.appInsightsReference.component] + * [performanceCountersSettings.appInsightsReference.component] Azure + * Application Insights component resource ID. * @member {string} * [performanceCountersSettings.appInsightsReference.component.id] The ID of * the resource * @member {string} * [performanceCountersSettings.appInsightsReference.instrumentationKey] + * Value of the Azure Application Insights instrumentation key. * @member {object} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault] + * Fully qualified resource indentifier of the Key Vault. * @member {string} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault.id] * The ID of the resource * @member {string} * [performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl] + * The URL referencing a secret in the Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/nodeStateCounts.js b/lib/services/batchaiManagement/lib/models/nodeStateCounts.js index 1e16885861..a8a35141b0 100644 --- a/lib/services/batchaiManagement/lib/models/nodeStateCounts.js +++ b/lib/services/batchaiManagement/lib/models/nodeStateCounts.js @@ -17,15 +17,16 @@ class NodeStateCounts { /** * Create a NodeStateCounts. - * @member {number} [idleNodeCount] Number of compute nodes in idle state. - * @member {number} [runningNodeCount] Number of compute nodes which are - * running jobs. - * @member {number} [preparingNodeCount] Number of compute nodes which are - * being prepared. - * @member {number} [unusableNodeCount] Number of compute nodes which are - * unusable. - * @member {number} [leavingNodeCount] Number of compute nodes which are - * leaving the cluster. + * @member {number} [idleNodeCount] Idle node count. Number of compute nodes + * in idle state. + * @member {number} [runningNodeCount] Running node count. Number of compute + * nodes which are running jobs. + * @member {number} [preparingNodeCount] Preparing node count. Number of + * compute nodes which are being prepared. + * @member {number} [unusableNodeCount] Unusable node count. Number of + * compute nodes which are in unusable state. + * @member {number} [leavingNodeCount] Leaving node count. Number of compute + * nodes which are leaving the cluster. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/operation.js b/lib/services/batchaiManagement/lib/models/operation.js index 358b1de82b..629409c4db 100644 --- a/lib/services/batchaiManagement/lib/models/operation.js +++ b/lib/services/batchaiManagement/lib/models/operation.js @@ -13,7 +13,7 @@ const models = require('./index'); /** - * @summary A REST API operation + * @summary A REST API operation. * * Details of a REST API operation * diff --git a/lib/services/batchaiManagement/lib/models/outputDirectory.js b/lib/services/batchaiManagement/lib/models/outputDirectory.js index 3f2cedf29e..e310a25565 100644 --- a/lib/services/batchaiManagement/lib/models/outputDirectory.js +++ b/lib/services/batchaiManagement/lib/models/outputDirectory.js @@ -17,19 +17,18 @@ class OutputDirectory { /** * Create a OutputDirectory. - * @member {string} id The name for the output directory. The path of the - * output directory will be available as a value of an environment variable - * with AZ_BATCHAI_OUTPUT_ name, where is the value of id attribute. - * @member {string} pathPrefix The prefix path where the output directory - * will be created. NOTE: This is an absolute path to prefix. E.g. - * $AZ_BATCHAI_MOUNT_ROOT/MyNFS/MyLogs. You can find the full path to the - * output directory by combining pathPrefix, jobOutputDirectoryPathSegment - * (reported by get job) and pathSuffix. - * @member {string} [pathSuffix] The suffix path where the output directory - * will be created. The suffix path where the output directory will be - * created. E.g. models. You can find the full path to the output directory + * @member {string} id ID. The ID of the output directory. The job can use + * AZ_BATCHAI_OUTPUT_ environment variale to find the directory path, + * where is the value of id attribute. + * @member {string} pathPrefix Path prefix. The prefix path where the output + * directory will be created. Note, this is an absolute path to prefix. E.g. + * $AZ_BATCHAI_MOUNT_ROOT/MyNFS/MyLogs. The full path to the output directory * by combining pathPrefix, jobOutputDirectoryPathSegment (reported by get * job) and pathSuffix. + * @member {string} [pathSuffix] Path suffix. The suffix path where the + * output directory will be created. E.g. models. You can find the full path + * to the output directory by combining pathPrefix, + * jobOutputDirectoryPathSegment (reported by get job) and pathSuffix. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/performanceCountersSettings.js b/lib/services/batchaiManagement/lib/models/performanceCountersSettings.js index 1c20eb9f11..320d3cfab3 100644 --- a/lib/services/batchaiManagement/lib/models/performanceCountersSettings.js +++ b/lib/services/batchaiManagement/lib/models/performanceCountersSettings.js @@ -19,25 +19,29 @@ const models = require('./index'); class PerformanceCountersSettings { /** * Create a PerformanceCountersSettings. - * @member {object} appInsightsReference Specifies Azure Application Insights - * information for performance counters reporting. If provided, Batch AI will - * upload node performance counters to the corresponding Azure Application - * Insights account. - * @member {object} [appInsightsReference.component] + * @member {object} appInsightsReference Azure Application Insights + * reference. Azure Application Insights information for performance counters + * reporting. If provided, Batch AI will upload node performance counters to + * the corresponding Azure Application Insights account. + * @member {object} [appInsightsReference.component] Azure Application + * Insights component resource ID. * @member {string} [appInsightsReference.component.id] The ID of the * resource - * @member {string} [appInsightsReference.instrumentationKey] + * @member {string} [appInsightsReference.instrumentationKey] Value of the + * Azure Application Insights instrumentation key. * @member {object} [appInsightsReference.instrumentationKeySecretReference] - * Specifies KeyVault Store and Secret which contains Azure Application - * Insights instrumentation key. One of instumentationKey or + * KeyVault Store and Secret which contains Azure Application Insights + * instrumentation key. One of instrumentationKey or * instrumentationKeySecretReference must be specified. * @member {object} - * [appInsightsReference.instrumentationKeySecretReference.sourceVault] + * [appInsightsReference.instrumentationKeySecretReference.sourceVault] Fully + * qualified resource indentifier of the Key Vault. * @member {string} * [appInsightsReference.instrumentationKeySecretReference.sourceVault.id] * The ID of the resource * @member {string} - * [appInsightsReference.instrumentationKeySecretReference.secretUrl] + * [appInsightsReference.instrumentationKeySecretReference.secretUrl] The URL + * referencing a secret in the Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/privateRegistryCredentials.js b/lib/services/batchaiManagement/lib/models/privateRegistryCredentials.js index 5496d25d71..1514e52c04 100644 --- a/lib/services/batchaiManagement/lib/models/privateRegistryCredentials.js +++ b/lib/services/batchaiManagement/lib/models/privateRegistryCredentials.js @@ -19,17 +19,19 @@ const models = require('./index'); class PrivateRegistryCredentials { /** * Create a PrivateRegistryCredentials. - * @member {string} username User name to login. - * @member {string} [password] Password to login. One of password or - * passwordSecretReference must be specified. - * @member {object} [passwordSecretReference] Specifies the location of the - * password, which is a Key Vault Secret. Users can store their secrets in - * Azure KeyVault and pass it to the Batch AI Service to integrate with + * @member {string} username User name. User name to login to the repository. + * @member {string} [password] Password. User password to login to the docker + * repository. One of password or passwordSecretReference must be specified. + * @member {object} [passwordSecretReference] Password secret reference. + * KeyVault Secret storing the password. Users can store their secrets in + * Azure KeyVault and pass it to the Batch AI service to integrate with * KeyVault. One of password or passwordSecretReference must be specified. - * @member {object} [passwordSecretReference.sourceVault] + * @member {object} [passwordSecretReference.sourceVault] Fully qualified + * resource indentifier of the Key Vault. * @member {string} [passwordSecretReference.sourceVault.id] The ID of the * resource - * @member {string} [passwordSecretReference.secretUrl] + * @member {string} [passwordSecretReference.secretUrl] The URL referencing a + * secret in the Key Vault. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/pyTorchSettings.js b/lib/services/batchaiManagement/lib/models/pyTorchSettings.js index fa93215c71..cec20d6eb6 100644 --- a/lib/services/batchaiManagement/lib/models/pyTorchSettings.js +++ b/lib/services/batchaiManagement/lib/models/pyTorchSettings.js @@ -11,23 +11,24 @@ 'use strict'; /** - * Specifies the settings for pyTorch job. + * pyTorch job settings. * */ class PyTorchSettings { /** * Create a PyTorchSettings. - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [commandLineArgs] Specifies the command line arguments - * for the master task. - * @member {number} [processCount] Number of processes to launch for the job - * execution. The default value for this property is equal to nodeCount - * property. - * @member {string} [communicationBackend] Type of the communication backend - * for distributed jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not - * required for non-distributed jobs. + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [commandLineArgs] Command line arguments. Command line + * arguments that need to be passed to the python script. + * @member {number} [processCount] Process count. Number of processes to + * launch for the job execution. The default value for this property is equal + * to nodeCount property + * @member {string} [communicationBackend] Communication backend. Type of the + * communication backend for distributed jobs. Valid values are 'TCP', 'Gloo' + * or 'MPI'. Not required for non-distributed jobs. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/remoteLoginInformation.js b/lib/services/batchaiManagement/lib/models/remoteLoginInformation.js index a5d5ba250e..244f2e1437 100644 --- a/lib/services/batchaiManagement/lib/models/remoteLoginInformation.js +++ b/lib/services/batchaiManagement/lib/models/remoteLoginInformation.js @@ -11,15 +11,16 @@ 'use strict'; /** - * Contains remote login details to SSH/RDP to a compute node in cluster. + * Login details to SSH to a compute node in cluster. * */ class RemoteLoginInformation { /** * Create a RemoteLoginInformation. - * @member {string} [nodeId] Id of the compute node - * @member {string} [ipAddress] ip address - * @member {number} [port] port number. + * @member {string} [nodeId] Node ID. ID of the compute node. + * @member {string} [ipAddress] IP address. Public IP address of the compute + * node. + * @member {number} [port] Port. SSH port number of the node. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/scaleSettings.js b/lib/services/batchaiManagement/lib/models/scaleSettings.js index dfb815c9de..6a159dd307 100644 --- a/lib/services/batchaiManagement/lib/models/scaleSettings.js +++ b/lib/services/batchaiManagement/lib/models/scaleSettings.js @@ -22,18 +22,24 @@ const models = require('./index'); class ScaleSettings { /** * Create a ScaleSettings. - * @member {object} [manual] The scale for the cluster by manual settings. - * @member {number} [manual.targetNodeCount] Default is 0. If - * autoScaleSettings are not specified, then the Cluster starts with this - * target. - * @member {string} [manual.nodeDeallocationOption] The default value is - * requeue. Possible values include: 'requeue', 'terminate', - * 'waitforjobcompletion' - * @member {object} [autoScale] The scale for the cluster by autoscale - * settings. - * @member {number} [autoScale.minimumNodeCount] - * @member {number} [autoScale.maximumNodeCount] - * @member {number} [autoScale.initialNodeCount] + * @member {object} [manual] Manual scale settings. Manual scale settings for + * the cluster. + * @member {number} [manual.targetNodeCount] The desired number of compute + * nodes in the Cluster. Default is 0. + * @member {string} [manual.nodeDeallocationOption] An action to be performed + * when the cluster size is decreasing. The default value is requeue. + * Possible values include: 'requeue', 'terminate', 'waitforjobcompletion' + * @member {object} [autoScale] Auto-scale settings. Auto-scale settings for + * the cluster. + * @member {number} [autoScale.minimumNodeCount] The minimum number of + * compute nodes the Batch AI service will try to allocate for the cluster. + * Note, the actual number of nodes can be less than the specified value if + * the subscription has not enough quota to fulfill the request. + * @member {number} [autoScale.maximumNodeCount] The maximum number of + * compute nodes the cluster can have. + * @member {number} [autoScale.initialNodeCount] The number of compute nodes + * to allocate on cluster creation. Note that this value is used only during + * cluster creation. Default: 0. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/setupTask.js b/lib/services/batchaiManagement/lib/models/setupTask.js index a0ecc169d9..be42c9fc5e 100644 --- a/lib/services/batchaiManagement/lib/models/setupTask.js +++ b/lib/services/batchaiManagement/lib/models/setupTask.js @@ -20,23 +20,23 @@ const models = require('./index'); class SetupTask { /** * Create a SetupTask. - * @member {string} commandLine Command line to be executed on each cluster's - * node after it being allocated or rebooted. Command line to be executed on - * each cluster's node after it being allocated or rebooted. The command is - * executed in a bash subshell as a root. - * @member {array} [environmentVariables] Collection of environment variables - * to be set for setup task. - * @member {array} [secrets] Collection of environment variables with secret - * values to be set for setup task. Server will never report values of these - * variables back. - * @member {string} stdOutErrPathPrefix The prefix of a path where the Batch - * AI service will upload the stdout and stderr of the setup task. - * @member {string} [stdOutErrPathSuffix] A path segment appended by Batch AI - * to stdOutErrPathPrefix to form a path where stdout and stderr of the setup - * task will be uploaded. Batch AI creates the setup task output directories - * under an unique path to avoid conflicts between different clusters. You - * can concatinate stdOutErrPathPrefix and stdOutErrPathSuffix to get the - * full path to the output directory. + * @member {string} commandLine Command line. The command line to be executed + * on each cluster's node after it being allocated or rebooted. The command + * is executed in a bash subshell as a root. + * @member {array} [environmentVariables] Environment variables. A collection + * of user defined environment variables to be set for setup task. + * @member {array} [secrets] Secrets. A collection of user defined + * environment variables with secret values to be set for the setup task. + * Server will never report values of these variables back. + * @member {string} stdOutErrPathPrefix Output path prefix. The prefix of a + * path where the Batch AI service will upload the stdout, stderr and + * execution log of the setup task. + * @member {string} [stdOutErrPathSuffix] Output path suffix. A path segment + * appended by Batch AI to stdOutErrPathPrefix to form a path where stdout, + * stderr and execution log of the setup task will be uploaded. Batch AI + * creates the setup task output directories under an unique path to avoid + * conflicts between different clusters. The full path can be obtained by + * concatenation of stdOutErrPathPrefix and stdOutErrPathSuffix. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/sshConfiguration.js b/lib/services/batchaiManagement/lib/models/sshConfiguration.js index 00482bba5c..1a5d605a86 100644 --- a/lib/services/batchaiManagement/lib/models/sshConfiguration.js +++ b/lib/services/batchaiManagement/lib/models/sshConfiguration.js @@ -13,20 +13,25 @@ const models = require('./index'); /** - * SSH configuration settings for the VM + * SSH configuration. * */ class SshConfiguration { /** * Create a SshConfiguration. - * @member {array} [publicIPsToAllow] List of source IP ranges to allow SSH - * connection to a node. Default value is '*' can be used to match all source - * IPs. Maximum number of IP ranges that can be specified are 400. - * @member {object} userAccountSettings Settings for user account to be - * created on a node. - * @member {string} [userAccountSettings.adminUserName] - * @member {string} [userAccountSettings.adminUserSshPublicKey] - * @member {string} [userAccountSettings.adminUserPassword] + * @member {array} [publicIPsToAllow] Allowed public IPs. List of source IP + * ranges to allow SSH connection from. The default value is '*' (all source + * IPs are allowed). Maximum number of IP ranges that can be specified is + * 400. + * @member {object} userAccountSettings User account settings. Settings for + * administrator user account to be created on a node. The account can be + * used to establish SSH connection to the node. + * @member {string} [userAccountSettings.adminUserName] Name of the + * administrator user account which can be used to SSH to nodes. + * @member {string} [userAccountSettings.adminUserSshPublicKey] SSH public + * key of the administrator user account. + * @member {string} [userAccountSettings.adminUserPassword] Password of the + * administrator user account. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/tensorFlowSettings.js b/lib/services/batchaiManagement/lib/models/tensorFlowSettings.js index 3ae05c1b98..7b9211ead9 100644 --- a/lib/services/batchaiManagement/lib/models/tensorFlowSettings.js +++ b/lib/services/batchaiManagement/lib/models/tensorFlowSettings.js @@ -11,32 +11,35 @@ 'use strict'; /** - * Specifies the settings for TensorFlow job. + * TensorFlow job settings. * */ class TensorFlowSettings { /** * Create a TensorFlowSettings. - * @member {string} pythonScriptFilePath The path and file name of the python - * script to execute the job. - * @member {string} [pythonInterpreterPath] The path to python interpreter. - * @member {string} [masterCommandLineArgs] Specifies the command line - * arguments for the master task. - * @member {string} [workerCommandLineArgs] Specifies the command line - * arguments for the worker task. This property is optional for single - * machine training. - * @member {string} [parameterServerCommandLineArgs] Specifies the command - * line arguments for the parameter server task. This property is optional - * for single machine training. - * @member {number} [workerCount] The number of worker tasks. If specified, - * the value must be less than or equal to (nodeCount * numberOfGPUs per VM). - * If not specified, the default value is equal to nodeCount. This property - * can be specified only for distributed TensorFlow training - * @member {number} [parameterServerCount] The number of parmeter server - * tasks. If specified, the value must be less than or equal to nodeCount. If - * not specified, the default value is equal to 1 for distributed TensorFlow - * training (This property is not applicable for single machine training). - * This property can be specified only for distributed TensorFlow training. + * @member {string} pythonScriptFilePath Python script file path. The python + * script to execute. + * @member {string} [pythonInterpreterPath] Python interpreter path. The path + * to the Python interpreter. + * @member {string} [masterCommandLineArgs] Master command line arguments. + * Command line arguments that need to be passed to the python script for the + * master task. + * @member {string} [workerCommandLineArgs] Worker command line arguments. + * Command line arguments that need to be passed to the python script for the + * worker task. Optional for single process jobs. + * @member {string} [parameterServerCommandLineArgs] Parameter server command + * line arguments. Command line arguments that need to be passed to the + * python script for the parameter server. Optional for single process jobs. + * @member {number} [workerCount] Worker count. The number of worker tasks. + * If specified, the value must be less than or equal to (nodeCount * + * numberOfGPUs per VM). If not specified, the default value is equal to + * nodeCount. This property can be specified only for distributed TensorFlow + * training. + * @member {number} [parameterServerCount] Parameter server count. The number + * of parameter server tasks. If specified, the value must be less than or + * equal to nodeCount. If not specified, the default value is equal to 1 for + * distributed TensorFlow training. This property can be specified only for + * distributed TensorFlow training. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/unmanagedFileSystemReference.js b/lib/services/batchaiManagement/lib/models/unmanagedFileSystemReference.js index 01449e675a..178ac024f8 100644 --- a/lib/services/batchaiManagement/lib/models/unmanagedFileSystemReference.js +++ b/lib/services/batchaiManagement/lib/models/unmanagedFileSystemReference.js @@ -11,19 +11,19 @@ 'use strict'; /** - * Details of the file system to mount on the compute cluster nodes. + * Unmananged file system mounting configuration. * */ class UnmanagedFileSystemReference { /** * Create a UnmanagedFileSystemReference. - * @member {string} mountCommand Command used to mount the unmanaged file - * system. - * @member {string} relativeMountPath Specifies the relative path on the - * compute cluster node where the file system will be mounted. Note that all - * cluster level unmanaged file system will be mounted under - * $AZ_BATCHAI_MOUNT_ROOT location and job level unmanaged file system will - * be mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT. + * @member {string} mountCommand Mount command. Mount command line. Note, + * Batch AI will append mount path to the command on its own. + * @member {string} relativeMountPath Relative mount path. The relative path + * on the compute node where the unmanaged file system will be mounted. Note + * that all cluster level unmanaged file systems will be mounted under + * $AZ_BATCHAI_MOUNT_ROOT location and all job level unmanaged file systems + * will be mounted under $AZ_BATCHAI_JOB_MOUNT_ROOT. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/userAccountSettings.js b/lib/services/batchaiManagement/lib/models/userAccountSettings.js index 09e6103b31..8c0d582222 100644 --- a/lib/services/batchaiManagement/lib/models/userAccountSettings.js +++ b/lib/services/batchaiManagement/lib/models/userAccountSettings.js @@ -18,13 +18,12 @@ class UserAccountSettings { /** * Create a UserAccountSettings. - * @member {string} adminUserName Specifies the name of the administrator - * account. - * @member {string} [adminUserSshPublicKey] SSH public keys used to - * authenticate with linux based VMs. This does not get returned in a GET - * response body. - * @member {string} [adminUserPassword] Admin user Password (linux only). - * This does not get returned in a GET response body. + * @member {string} adminUserName User name. Name of the administrator user + * account which can be used to SSH to nodes. + * @member {string} [adminUserSshPublicKey] SSH public key. SSH public key of + * the administrator user account. + * @member {string} [adminUserPassword] Password. Password of the + * administrator user account. */ constructor() { } diff --git a/lib/services/batchaiManagement/lib/models/virtualMachineConfiguration.js b/lib/services/batchaiManagement/lib/models/virtualMachineConfiguration.js index a12fcabe1c..43749ca347 100644 --- a/lib/services/batchaiManagement/lib/models/virtualMachineConfiguration.js +++ b/lib/services/batchaiManagement/lib/models/virtualMachineConfiguration.js @@ -13,21 +13,25 @@ const models = require('./index'); /** - * Settings for OS image. + * VM configuration. * */ class VirtualMachineConfiguration { /** * Create a VirtualMachineConfiguration. - * @member {object} [imageReference] Reference to OS image. - * @member {string} [imageReference.publisher] - * @member {string} [imageReference.offer] - * @member {string} [imageReference.sku] - * @member {string} [imageReference.version] - * @member {string} [imageReference.virtualMachineImageId] The virtual - * machine image must be in the same region and subscription as the cluster. - * For information about the firewall settings for the Batch node agent to - * communicate with the Batch service see + * @member {object} [imageReference] Image reference. OS image reference for + * cluster nodes. + * @member {string} [imageReference.publisher] Publisher of the image. + * @member {string} [imageReference.offer] Offer of the image. + * @member {string} [imageReference.sku] SKU of the image. + * @member {string} [imageReference.version] Version of the image. + * @member {string} [imageReference.virtualMachineImageId] The ARM resource + * identifier of the virtual machine image for the compute nodes. This is of + * the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as + * the cluster. For information about the firewall settings for the Batch + * node agent to communicate with the Batch service see * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. diff --git a/lib/services/batchaiManagement/lib/models/workspace.js b/lib/services/batchaiManagement/lib/models/workspace.js index fc5aee7839..76e715b45a 100644 --- a/lib/services/batchaiManagement/lib/models/workspace.js +++ b/lib/services/batchaiManagement/lib/models/workspace.js @@ -13,20 +13,21 @@ const models = require('./index'); /** - * Describes Batch AI Workspace. + * Batch AI Workspace information. * * @extends models['Resource'] */ class Workspace extends models['Resource'] { /** * Create a Workspace. - * @member {date} [creationTime] Time when the Workspace was created. - * @member {string} [provisioningState] The provisioned state of the - * workspace. Possible values include: 'creating', 'succeeded', 'failed', - * 'deleting' - * @member {date} [provisioningStateTransitionTime] The time at which the - * workspace entered its current provisioning state. The time at which the - * workspace entered its current provisioning state. + * @member {date} [creationTime] Creation time. Time when the Workspace was + * created. + * @member {string} [provisioningState] Provisioning state. The provisioned + * state of the Workspace. Possible values include: 'creating', 'succeeded', + * 'failed', 'deleting' + * @member {date} [provisioningStateTransitionTime] Provisioning state + * transition time. The time at which the workspace entered its current + * provisioning state. */ constructor() { super(); diff --git a/lib/services/batchaiManagement/lib/models/workspaceCreateParameters.js b/lib/services/batchaiManagement/lib/models/workspaceCreateParameters.js index ed7ab79592..f234c4de3b 100644 --- a/lib/services/batchaiManagement/lib/models/workspaceCreateParameters.js +++ b/lib/services/batchaiManagement/lib/models/workspaceCreateParameters.js @@ -11,14 +11,15 @@ 'use strict'; /** - * Parameters supplied to the Create operation. + * Workspace creation parameters. * */ class WorkspaceCreateParameters { /** * Create a WorkspaceCreateParameters. - * @member {string} location The region in which to create the Workspace. - * @member {object} [tags] The user specified tags associated with the + * @member {string} location Location. The region in which to create the + * Workspace. + * @member {object} [tags] Tags. The user specified tags associated with the * Workspace. */ constructor() { diff --git a/lib/services/batchaiManagement/lib/models/workspaceUpdateParameters.js b/lib/services/batchaiManagement/lib/models/workspaceUpdateParameters.js new file mode 100644 index 0000000000..6a8261cb0a --- /dev/null +++ b/lib/services/batchaiManagement/lib/models/workspaceUpdateParameters.js @@ -0,0 +1,60 @@ +/* + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. See License.txt in the project root for + * license information. + * + * Code generated by Microsoft (R) AutoRest Code Generator. + * Changes may cause incorrect behavior and will be lost if the code is + * regenerated. + */ + +'use strict'; + +/** + * Workspace update parameters. + * + */ +class WorkspaceUpdateParameters { + /** + * Create a WorkspaceUpdateParameters. + * @member {object} [tags] Tags. The user specified tags associated with the + * Workspace. + */ + constructor() { + } + + /** + * Defines the metadata of WorkspaceUpdateParameters + * + * @returns {object} metadata of WorkspaceUpdateParameters + * + */ + mapper() { + return { + required: false, + serializedName: 'WorkspaceUpdateParameters', + type: { + name: 'Composite', + className: 'WorkspaceUpdateParameters', + modelProperties: { + tags: { + required: false, + serializedName: 'tags', + type: { + name: 'Dictionary', + value: { + required: false, + serializedName: 'StringElementType', + type: { + name: 'String' + } + } + } + } + } + } + }; + } +} + +module.exports = WorkspaceUpdateParameters; diff --git a/lib/services/batchaiManagement/lib/operations/clusters.js b/lib/services/batchaiManagement/lib/operations/clusters.js index 675cc2af56..b3aedcf222 100644 --- a/lib/services/batchaiManagement/lib/operations/clusters.js +++ b/lib/services/batchaiManagement/lib/operations/clusters.js @@ -14,592 +14,9 @@ const msRest = require('ms-rest'); const msRestAzure = require('ms-rest-azure'); const WebResource = msRest.WebResource; -/** - * Gets a list of Clusters associated with the given subscription. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.clustersListOptions] Additional parameters for the - * operation - * - * @param {number} [options.clustersListOptions.maxResults] The maximum number - * of items to return in the response. A maximum of 1000 files can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _list(options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - if (!callback) { - throw new Error('callback cannot be null.'); - } - let clustersListOptions = (options && options.clustersListOptions !== undefined) ? options.clustersListOptions : undefined; - // Validate - try { - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - let maxResults; - try { - if (clustersListOptions !== null && clustersListOptions !== undefined) - { - maxResults = clustersListOptions.maxResults; - if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { - throw new Error('maxResults must be of type number.'); - } - } - } catch (error) { - return callback(error); - } - - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/providers/Microsoft.BatchAI/clusters'; - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (maxResults !== null && maxResults !== undefined) { - queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); - } - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'GET'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - httpRequest.body = null; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['ClusterListResult']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } - - return callback(null, result, httpRequest, response); - }); -} - -/** - * Gets a list of Clusters within the specified resource group. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.clustersListByResourceGroupOptions] Additional - * parameters for the operation - * - * @param {number} [options.clustersListByResourceGroupOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _listByResourceGroup(resourceGroupName, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - if (!callback) { - throw new Error('callback cannot be null.'); - } - let clustersListByResourceGroupOptions = (options && options.clustersListByResourceGroupOptions !== undefined) ? options.clustersListByResourceGroupOptions : undefined; - // Validate - try { - if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { - throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); - } - if (resourceGroupName !== null && resourceGroupName !== undefined) { - if (resourceGroupName.match(/^[-\w\._]+$/) === null) - { - throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); - } - } - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - let maxResults; - try { - if (clustersListByResourceGroupOptions !== null && clustersListByResourceGroupOptions !== undefined) - { - maxResults = clustersListByResourceGroupOptions.maxResults; - if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { - throw new Error('maxResults must be of type number.'); - } - } - } catch (error) { - return callback(error); - } - - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/clusters'; - requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (maxResults !== null && maxResults !== undefined) { - queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); - } - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'GET'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - httpRequest.body = null; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['ClusterListResult']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } - - return callback(null, result, httpRequest, response); - }); -} - - -/** - * Creates a Cluster in the given Workspace. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * - * @param {object} parameters The parameters to provide for the Cluster - * creation. - * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For - * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). - * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' - * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. - * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings - * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. - * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' - * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. - * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. - * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. - * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. - * - * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. - * - * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} - * The virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - * Note, you need to provide publisher, offer and sku of the base OS image of - * which the custom image has been derived from. - * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. - * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to - * download/install software. - * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated - * or rebooted. The command is executed in a bash subshell as a root. - * - * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. - * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. - * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. - * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. - * - * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. - * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. - * - * @param {string} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. - * - * @param {object} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. - * - * @param {string} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. - * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. - * - * @param {string} parameters.subnet.id The ID of the resource - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _create(resourceGroupName, workspaceName, clusterName, parameters, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - - if (!callback) { - throw new Error('callback cannot be null.'); - } - - // Send request - this.beginCreate(resourceGroupName, workspaceName, clusterName, parameters, options, (err, parsedResult, httpRequest, response) => { - if (err) return callback(err); - - let initialResult = new msRest.HttpOperationResponse(); - initialResult.request = httpRequest; - initialResult.response = response; - initialResult.body = response.body; - client.getLongRunningOperationResult(initialResult, options, (err, pollingResult) => { - if (err) return callback(err); - - // Create Result - let result = null; - - httpRequest = pollingResult.request; - response = pollingResult.response; - let responseBody = pollingResult.body; - if (responseBody === '') responseBody = null; - - // Deserialize Response - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['Cluster']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - - return callback(null, result, httpRequest, response); - }); - }); -} /** - * Updates properties of a Cluster. + * Creates a Cluster in the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -613,246 +30,184 @@ function _create(resourceGroupName, workspaceName, clusterName, parameters, opti * characters along with dash (-) and underscore (_). The name must be from 1 * through 64 characters long. * - * @param {object} parameters Additional parameters for cluster update. + * @param {object} parameters The parameters to provide for the Cluster + * creation. + * + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For + * information about available VM sizes for clusters using images from the + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. - * - * @param {object} [options] Optional Parameters. + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * - * @param {function} callback - The callback. + * @param {object} [parameters.virtualMachineConfiguration.imageReference] + * Image reference. OS image reference for cluster nodes. * - * @returns {function} callback(err, result, request, response) + * @param {string} + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * - * {Error} err - The Error object if an error occurred, null otherwise. + * @param {string} parameters.virtualMachineConfiguration.imageReference.offer + * Offer. Offer of the image. * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. + * @param {string} parameters.virtualMachineConfiguration.imageReference.sku + * SKU. SKU of the image. * - * {object} [request] - The HTTP Request object if an error did not occur. + * @param {string} + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _update(resourceGroupName, workspaceName, clusterName, parameters, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - if (!callback) { - throw new Error('callback cannot be null.'); - } - // Validate - try { - if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { - throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); - } - if (resourceGroupName !== null && resourceGroupName !== undefined) { - if (resourceGroupName.match(/^[-\w\._]+$/) === null) - { - throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); - } - } - if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { - throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); - } - if (workspaceName !== null && workspaceName !== undefined) { - if (workspaceName.length > 64) - { - throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); - } - if (workspaceName.length < 1) - { - throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); - } - if (workspaceName.match(/^[-\w_]+$/) === null) - { - throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } - if (clusterName === null || clusterName === undefined || typeof clusterName.valueOf() !== 'string') { - throw new Error('clusterName cannot be null or undefined and it must be of type string.'); - } - if (clusterName !== null && clusterName !== undefined) { - if (clusterName.length > 64) - { - throw new Error('"clusterName" should satisfy the constraint - "MaxLength": 64'); - } - if (clusterName.length < 1) - { - throw new Error('"clusterName" should satisfy the constraint - "MinLength": 1'); - } - if (clusterName.match(/^[-\w_]+$/) === null) - { - throw new Error('"clusterName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } - if (parameters === null || parameters === undefined) { - throw new Error('parameters cannot be null or undefined.'); - } - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; - requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); - requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); - requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'PATCH'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - // Serialize Request - let requestContent = null; - let requestModel = null; - try { - if (parameters !== null && parameters !== undefined) { - let requestModelMapper = new client.models['ClusterUpdateParameters']().mapper(); - requestModel = client.serialize(requestModelMapper, parameters, 'parameters'); - requestContent = JSON.stringify(requestModel); - } - } catch (error) { - let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + - `payload - ${JSON.stringify(parameters, null, 2)}.`); - return callback(serializationError); - } - httpRequest.body = requestContent; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['Cluster']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } - - return callback(null, result, httpRequest, response); - }); -} - - -/** - * Deletes a Cluster. + * @param {string} + * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the + * cluster. For information about the firewall settings for the Batch node + * agent to communicate with the Batch service see + * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + * Note, you need to provide publisher, offer and sku of the base OS image of + * which the custom image has been derived from. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to + * download/install software. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated + * or rebooted. The command is executed in a bash subshell as a root. + * + * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] + * Environment variables. A collection of user defined environment variables to + * be set for setup task. + * + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. + * + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. + * + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. + * + * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. + * + * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. + * + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. + * + * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. + * + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. + * + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. + * + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component + * Component ID. Azure Application Insights component resource ID. + * + * @param {string} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. + * + * @param {object} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. + * + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. + * + * @param {string} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. + * + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. + * + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. + * + * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. + * + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. + * + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. + * + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -865,13 +220,14 @@ function _update(resourceGroupName, workspaceName, clusterName, parameters, opti * * {Error} err - The Error object if an error occurred, null otherwise. * - * {null} [result] - The deserialized result object if an error did not occur. + * {object} [result] - The deserialized result object if an error did not occur. + * See {@link Cluster} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _deleteMethod(resourceGroupName, workspaceName, clusterName, options, callback) { +function _create(resourceGroupName, workspaceName, clusterName, parameters, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -884,7 +240,7 @@ function _deleteMethod(resourceGroupName, workspaceName, clusterName, options, c } // Send request - this.beginDeleteMethod(resourceGroupName, workspaceName, clusterName, options, (err, parsedResult, httpRequest, response) => { + this.beginCreate(resourceGroupName, workspaceName, clusterName, parameters, options, (err, parsedResult, httpRequest, response) => { if (err) return callback(err); let initialResult = new msRest.HttpOperationResponse(); @@ -903,183 +259,6 @@ function _deleteMethod(resourceGroupName, workspaceName, clusterName, options, c if (responseBody === '') responseBody = null; // Deserialize Response - - return callback(null, result, httpRequest, response); - }); - }); -} - -/** - * Gets information about a Cluster. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _get(resourceGroupName, workspaceName, clusterName, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - if (!callback) { - throw new Error('callback cannot be null.'); - } - // Validate - try { - if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { - throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); - } - if (resourceGroupName !== null && resourceGroupName !== undefined) { - if (resourceGroupName.match(/^[-\w\._]+$/) === null) - { - throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); - } - } - if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { - throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); - } - if (workspaceName !== null && workspaceName !== undefined) { - if (workspaceName.length > 64) - { - throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); - } - if (workspaceName.length < 1) - { - throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); - } - if (workspaceName.match(/^[-\w_]+$/) === null) - { - throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } - if (clusterName === null || clusterName === undefined || typeof clusterName.valueOf() !== 'string') { - throw new Error('clusterName cannot be null or undefined and it must be of type string.'); - } - if (clusterName !== null && clusterName !== undefined) { - if (clusterName.length > 64) - { - throw new Error('"clusterName" should satisfy the constraint - "MaxLength": 64'); - } - if (clusterName.length < 1) - { - throw new Error('"clusterName" should satisfy the constraint - "MinLength": 1'); - } - if (clusterName.match(/^[-\w_]+$/) === null) - { - throw new Error('"clusterName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; - requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); - requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); - requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'GET'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - httpRequest.body = null; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { let parsedResponse = null; try { parsedResponse = JSON.parse(responseBody); @@ -1094,14 +273,14 @@ function _get(resourceGroupName, workspaceName, clusterName, options, callback) deserializationError.response = msRest.stripResponse(response); return callback(deserializationError); } - } - return callback(null, result, httpRequest, response); + return callback(null, result, httpRequest, response); + }); }); } /** - * Get the IP address, port of all the compute nodes in the Cluster. + * Updates properties of a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1117,6 +296,37 @@ function _get(resourceGroupName, workspaceName, clusterName, options, callback) * * @param {object} [options] Optional Parameters. * + * @param {object} [options.scaleSettings] Scale settings. Desired scale + * settings for the cluster. Batch AI service supports manual and auto scale + * clusters. + * + * @param {object} [options.scaleSettings.manual] Manual scale settings. Manual + * scale settings for the cluster. + * + * @param {number} options.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. + * + * @param {string} [options.scaleSettings.manual.nodeDeallocationOption] Node + * deallocation options. An action to be performed when the cluster size is + * decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' + * + * @param {object} [options.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. + * + * @param {number} options.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. + * + * @param {number} options.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. + * + * @param {number} [options.scaleSettings.autoScale.initialNodeCount] Initial + * node count. The number of compute nodes to allocate on cluster creation. + * Note that this value is used only during cluster creation. Default: 0. + * * @param {object} [options.customHeaders] Headers that will be added to the * request * @@ -1127,14 +337,13 @@ function _get(resourceGroupName, workspaceName, clusterName, options, callback) * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. - * See {@link RemoteLoginInformationListResult} for more - * information. + * See {@link Cluster} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterName, options, callback) { +function _update(resourceGroupName, workspaceName, clusterName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1144,6 +353,7 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa if (!callback) { throw new Error('callback cannot be null.'); } + let scaleSettings = (options && options.scaleSettings !== undefined) ? options.scaleSettings : undefined; // Validate try { if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { @@ -1201,10 +411,15 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa } catch (error) { return callback(error); } + let parameters; + if (scaleSettings !== null && scaleSettings !== undefined) { + parameters = new client.models['ClusterUpdateParameters'](); + parameters.scaleSettings = scaleSettings; + } // Construct URL let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}/listRemoteLoginInformation'; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); @@ -1217,7 +432,7 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'POST'; + httpRequest.method = 'PATCH'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -1235,7 +450,21 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa } } } - httpRequest.body = null; + // Serialize Request + let requestContent = null; + let requestModel = null; + try { + if (parameters !== null && parameters !== undefined) { + let requestModelMapper = new client.models['ClusterUpdateParameters']().mapper(); + requestModel = client.serialize(requestModelMapper, parameters, 'parameters'); + requestContent = JSON.stringify(requestModel); + } + } catch (error) { + let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + + `payload - ${JSON.stringify(parameters, null, 2)}.`); + return callback(serializationError); + } + httpRequest.body = requestContent; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { @@ -1277,7 +506,7 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['RemoteLoginInformationListResult']().mapper(); + let resultMapper = new client.models['Cluster']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { @@ -1292,8 +521,9 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa }); } + /** - * Gets information about Clusters associated with the given Workspace. + * Deletes a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1302,14 +532,82 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. + * * @param {object} [options] Optional Parameters. * - * @param {object} [options.clustersListByWorkspaceOptions] Additional - * parameters for the operation + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {number} [options.clustersListByWorkspaceOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. + * @param {function} callback - The callback. + * + * @returns {function} callback(err, result, request, response) + * + * {Error} err - The Error object if an error occurred, null otherwise. + * + * {null} [result] - The deserialized result object if an error did not occur. + * + * {object} [request] - The HTTP Request object if an error did not occur. + * + * {stream} [response] - The HTTP Response stream if an error did not occur. + */ +function _deleteMethod(resourceGroupName, workspaceName, clusterName, options, callback) { + /* jshint validthis: true */ + let client = this.client; + if(!callback && typeof options === 'function') { + callback = options; + options = null; + } + + if (!callback) { + throw new Error('callback cannot be null.'); + } + + // Send request + this.beginDeleteMethod(resourceGroupName, workspaceName, clusterName, options, (err, parsedResult, httpRequest, response) => { + if (err) return callback(err); + + let initialResult = new msRest.HttpOperationResponse(); + initialResult.request = httpRequest; + initialResult.response = response; + initialResult.body = response.body; + client.getLongRunningOperationResult(initialResult, options, (err, pollingResult) => { + if (err) return callback(err); + + // Create Result + let result = null; + + httpRequest = pollingResult.request; + response = pollingResult.response; + let responseBody = pollingResult.body; + if (responseBody === '') responseBody = null; + + // Deserialize Response + + return callback(null, result, httpRequest, response); + }); + }); +} + +/** + * Gets information about a Cluster. + * + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. + * + * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -1321,13 +619,13 @@ function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterNa * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * See {@link Cluster} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { +function _get(resourceGroupName, workspaceName, clusterName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1337,7 +635,6 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { if (!callback) { throw new Error('callback cannot be null.'); } - let clustersListByWorkspaceOptions = (options && options.clustersListByWorkspaceOptions !== undefined) ? options.clustersListByWorkspaceOptions : undefined; // Validate try { if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { @@ -1366,6 +663,23 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); } } + if (clusterName === null || clusterName === undefined || typeof clusterName.valueOf() !== 'string') { + throw new Error('clusterName cannot be null or undefined and it must be of type string.'); + } + if (clusterName !== null && clusterName !== undefined) { + if (clusterName.length > 64) + { + throw new Error('"clusterName" should satisfy the constraint - "MaxLength": 64'); + } + if (clusterName.length < 1) + { + throw new Error('"clusterName" should satisfy the constraint - "MinLength": 1'); + } + if (clusterName.match(/^[-\w_]+$/) === null) + { + throw new Error('"clusterName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); } @@ -1378,30 +692,16 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { } catch (error) { return callback(error); } - let maxResults; - try { - if (clustersListByWorkspaceOptions !== null && clustersListByWorkspaceOptions !== undefined) - { - maxResults = clustersListByWorkspaceOptions.maxResults; - if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { - throw new Error('maxResults must be of type number.'); - } - } - } catch (error) { - return callback(error); - } // Construct URL let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters'; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); + requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); let queryParameters = []; queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (maxResults !== null && maxResults !== undefined) { - queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); - } if (queryParameters.length > 0) { requestUrl += '?' + queryParameters.join('&'); } @@ -1468,7 +768,7 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['ClusterListResult']().mapper(); + let resultMapper = new client.models['Cluster']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { @@ -1484,7 +784,7 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { } /** - * Creates a Cluster in the given Workspace. + * Get the IP address, port of all the compute nodes in the Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1498,180 +798,6 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { * characters along with dash (-) and underscore (_). The name must be from 1 * through 64 characters long. * - * @param {object} parameters The parameters to provide for the Cluster - * creation. - * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For - * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). - * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' - * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. - * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings - * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. - * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' - * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. - * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. - * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. - * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. - * - * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. - * - * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} - * The virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - * Note, you need to provide publisher, offer and sku of the base OS image of - * which the custom image has been derived from. - * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. - * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to - * download/install software. - * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated - * or rebooted. The command is executed in a bash subshell as a root. - * - * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. - * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. - * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. - * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. - * - * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. - * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. - * - * @param {string} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. - * - * @param {object} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. - * - * @param {string} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. - * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. - * - * @param {string} parameters.subnet.id The ID of the resource - * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -1684,13 +810,14 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. + * See {@link RemoteLoginInformationListResult} for more + * information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, options, callback) { +function _listRemoteLoginInformation(resourceGroupName, workspaceName, clusterName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1745,9 +872,6 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, throw new Error('"clusterName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); } } - if (parameters === null || parameters === undefined) { - throw new Error('parameters cannot be null or undefined.'); - } if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); } @@ -1763,7 +887,7 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, // Construct URL let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}/listRemoteLoginInformation'; requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); @@ -1776,7 +900,7 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'PUT'; + httpRequest.method = 'POST'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -1787,35 +911,21 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { httpRequest.headers['accept-language'] = this.client.acceptLanguage; } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - // Serialize Request - let requestContent = null; - let requestModel = null; - try { - if (parameters !== null && parameters !== undefined) { - let requestModelMapper = new client.models['ClusterCreateParameters']().mapper(); - requestModel = client.serialize(requestModelMapper, parameters, 'parameters'); - requestContent = JSON.stringify(requestModel); + if(options) { + for(let headerName in options['customHeaders']) { + if (options['customHeaders'].hasOwnProperty(headerName)) { + httpRequest.headers[headerName] = options['customHeaders'][headerName]; + } } - } catch (error) { - let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + - `payload - ${JSON.stringify(parameters, null, 2)}.`); - return callback(serializationError); } - httpRequest.body = requestContent; + httpRequest.body = null; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200 && statusCode !== 202) { + if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -1850,7 +960,7 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['Cluster']().mapper(); + let resultMapper = new client.models['RemoteLoginInformationListResult']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { @@ -1866,7 +976,7 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, } /** - * Deletes a Cluster. + * Gets information about Clusters associated with the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1875,13 +985,15 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * * @param {object} [options] Optional Parameters. * + * @param {object} [options.clustersListByWorkspaceOptions] Additional + * parameters for the operation + * + * @param {number} [options.clustersListByWorkspaceOptions.maxResults] The + * maximum number of items to return in the response. A maximum of 1000 files + * can be returned. + * * @param {object} [options.customHeaders] Headers that will be added to the * request * @@ -1891,13 +1003,14 @@ function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, * * {Error} err - The Error object if an error occurred, null otherwise. * - * {null} [result] - The deserialized result object if an error did not occur. + * {object} [result] - The deserialized result object if an error did not occur. + * See {@link ClusterListResult} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, options, callback) { +function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1907,6 +1020,7 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, optio if (!callback) { throw new Error('callback cannot be null.'); } + let clustersListByWorkspaceOptions = (options && options.clustersListByWorkspaceOptions !== undefined) ? options.clustersListByWorkspaceOptions : undefined; // Validate try { if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { @@ -1935,23 +1049,6 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, optio throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); } } - if (clusterName === null || clusterName === undefined || typeof clusterName.valueOf() !== 'string') { - throw new Error('clusterName cannot be null or undefined and it must be of type string.'); - } - if (clusterName !== null && clusterName !== undefined) { - if (clusterName.length > 64) - { - throw new Error('"clusterName" should satisfy the constraint - "MaxLength": 64'); - } - if (clusterName.length < 1) - { - throw new Error('"clusterName" should satisfy the constraint - "MinLength": 1'); - } - if (clusterName.match(/^[-\w_]+$/) === null) - { - throw new Error('"clusterName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); } @@ -1964,23 +1061,37 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, optio } catch (error) { return callback(error); } + let maxResults; + try { + if (clustersListByWorkspaceOptions !== null && clustersListByWorkspaceOptions !== undefined) + { + maxResults = clustersListByWorkspaceOptions.maxResults; + if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { + throw new Error('maxResults must be of type number.'); + } + } + } catch (error) { + return callback(error); + } // Construct URL let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters'; requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); - requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); let queryParameters = []; queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); + if (maxResults !== null && maxResults !== undefined) { + queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); + } if (queryParameters.length > 0) { requestUrl += '?' + queryParameters.join('&'); } // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'DELETE'; + httpRequest.method = 'GET'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -2005,7 +1116,7 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, optio return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200 && statusCode !== 202 && statusCode !== 204) { + if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -2033,16 +1144,221 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, optio // Create Result let result = null; if (responseBody === '') responseBody = null; + // Deserialize Response + if (statusCode === 200) { + let parsedResponse = null; + try { + parsedResponse = JSON.parse(responseBody); + result = JSON.parse(responseBody); + if (parsedResponse !== null && parsedResponse !== undefined) { + let resultMapper = new client.models['ClusterListResult']().mapper(); + result = client.deserialize(resultMapper, parsedResponse, 'result'); + } + } catch (error) { + let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); + deserializationError.request = msRest.stripRequest(httpRequest); + deserializationError.response = msRest.stripResponse(response); + return callback(deserializationError); + } + } return callback(null, result, httpRequest, response); }); } /** - * Gets a list of Clusters associated with the given subscription. + * Creates a Cluster in the given Workspace. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. + * + * @param {object} parameters The parameters to provide for the Cluster + * creation. + * + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For + * information about available VM sizes for clusters using images from the + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). + * + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' + * + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. + * + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. + * + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. + * + * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' + * + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. + * + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. + * + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. + * + * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. + * + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. + * + * @param {object} [parameters.virtualMachineConfiguration.imageReference] + * Image reference. OS image reference for cluster nodes. + * + * @param {string} + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. + * + * @param {string} parameters.virtualMachineConfiguration.imageReference.offer + * Offer. Offer of the image. + * + * @param {string} parameters.virtualMachineConfiguration.imageReference.sku + * SKU. SKU of the image. + * + * @param {string} + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. + * + * @param {string} + * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the + * cluster. For information about the firewall settings for the Batch node + * agent to communicate with the Batch service see + * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + * Note, you need to provide publisher, offer and sku of the base OS image of + * which the custom image has been derived from. + * + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. + * + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to + * download/install software. + * + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated + * or rebooted. The command is executed in a bash subshell as a root. + * + * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] + * Environment variables. A collection of user defined environment variables to + * be set for setup task. + * + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. + * + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. + * + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. + * + * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. + * + * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. + * + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. + * + * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. + * + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. + * + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. + * + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component + * Component ID. Azure Application Insights component resource ID. + * + * @param {string} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. + * + * @param {object} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. + * + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. + * + * @param {string} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. + * + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. + * + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. + * + * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. + * + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. + * + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. + * + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -2056,13 +1372,13 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, optio * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * See {@link Cluster} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _listNext(nextPageLink, options, callback) { +function _beginCreate(resourceGroupName, workspaceName, clusterName, parameters, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -2074,8 +1390,57 @@ function _listNext(nextPageLink, options, callback) { } // Validate try { - if (nextPageLink === null || nextPageLink === undefined || typeof nextPageLink.valueOf() !== 'string') { - throw new Error('nextPageLink cannot be null or undefined and it must be of type string.'); + if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { + throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); + } + if (resourceGroupName !== null && resourceGroupName !== undefined) { + if (resourceGroupName.match(/^[-\w\._]+$/) === null) + { + throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); + } + } + if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { + throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); + } + if (workspaceName !== null && workspaceName !== undefined) { + if (workspaceName.length > 64) + { + throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); + } + if (workspaceName.length < 1) + { + throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); + } + if (workspaceName.match(/^[-\w_]+$/) === null) + { + throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (clusterName === null || clusterName === undefined || typeof clusterName.valueOf() !== 'string') { + throw new Error('clusterName cannot be null or undefined and it must be of type string.'); + } + if (clusterName !== null && clusterName !== undefined) { + if (clusterName.length > 64) + { + throw new Error('"clusterName" should satisfy the constraint - "MaxLength": 64'); + } + if (clusterName.length < 1) + { + throw new Error('"clusterName" should satisfy the constraint - "MinLength": 1'); + } + if (clusterName.match(/^[-\w_]+$/) === null) + { + throw new Error('"clusterName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (parameters === null || parameters === undefined) { + throw new Error('parameters cannot be null or undefined.'); + } + if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { + throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); + } + if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { + throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); } if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { throw new Error('this.client.acceptLanguage must be of type string.'); @@ -2085,12 +1450,21 @@ function _listNext(nextPageLink, options, callback) { } // Construct URL - let requestUrl = '{nextLink}'; - requestUrl = requestUrl.replace('{nextLink}', nextPageLink); + let baseUrl = this.client.baseUri; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; + requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); + requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); + requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); + requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); + let queryParameters = []; + queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); + if (queryParameters.length > 0) { + requestUrl += '?' + queryParameters.join('&'); + } // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'GET'; + httpRequest.method = 'PUT'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -2108,14 +1482,28 @@ function _listNext(nextPageLink, options, callback) { } } } - httpRequest.body = null; + // Serialize Request + let requestContent = null; + let requestModel = null; + try { + if (parameters !== null && parameters !== undefined) { + let requestModelMapper = new client.models['ClusterCreateParameters']().mapper(); + requestModel = client.serialize(requestModelMapper, parameters, 'parameters'); + requestContent = JSON.stringify(requestModel); + } + } catch (error) { + let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + + `payload - ${JSON.stringify(parameters, null, 2)}.`); + return callback(serializationError); + } + httpRequest.body = requestContent; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200) { + if (statusCode !== 200 && statusCode !== 202) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -2150,7 +1538,7 @@ function _listNext(nextPageLink, options, callback) { parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['ClusterListResult']().mapper(); + let resultMapper = new client.models['Cluster']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { @@ -2166,10 +1554,19 @@ function _listNext(nextPageLink, options, callback) { } /** - * Gets a list of Clusters within the specified resource group. + * Deletes a Cluster. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -2182,14 +1579,13 @@ function _listNext(nextPageLink, options, callback) { * * {Error} err - The Error object if an error occurred, null otherwise. * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _listByResourceGroupNext(nextPageLink, options, callback) { +function _beginDeleteMethod(resourceGroupName, workspaceName, clusterName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -2201,8 +1597,54 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { } // Validate try { - if (nextPageLink === null || nextPageLink === undefined || typeof nextPageLink.valueOf() !== 'string') { - throw new Error('nextPageLink cannot be null or undefined and it must be of type string.'); + if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { + throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); + } + if (resourceGroupName !== null && resourceGroupName !== undefined) { + if (resourceGroupName.match(/^[-\w\._]+$/) === null) + { + throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); + } + } + if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { + throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); + } + if (workspaceName !== null && workspaceName !== undefined) { + if (workspaceName.length > 64) + { + throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); + } + if (workspaceName.length < 1) + { + throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); + } + if (workspaceName.match(/^[-\w_]+$/) === null) + { + throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (clusterName === null || clusterName === undefined || typeof clusterName.valueOf() !== 'string') { + throw new Error('clusterName cannot be null or undefined and it must be of type string.'); + } + if (clusterName !== null && clusterName !== undefined) { + if (clusterName.length > 64) + { + throw new Error('"clusterName" should satisfy the constraint - "MaxLength": 64'); + } + if (clusterName.length < 1) + { + throw new Error('"clusterName" should satisfy the constraint - "MinLength": 1'); + } + if (clusterName.match(/^[-\w_]+$/) === null) + { + throw new Error('"clusterName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { + throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); + } + if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { + throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); } if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { throw new Error('this.client.acceptLanguage must be of type string.'); @@ -2212,12 +1654,21 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { } // Construct URL - let requestUrl = '{nextLink}'; - requestUrl = requestUrl.replace('{nextLink}', nextPageLink); + let baseUrl = this.client.baseUri; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/clusters/{clusterName}'; + requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); + requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); + requestUrl = requestUrl.replace('{clusterName}', encodeURIComponent(clusterName)); + requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); + let queryParameters = []; + queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); + if (queryParameters.length > 0) { + requestUrl += '?' + queryParameters.join('&'); + } // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'GET'; + httpRequest.method = 'DELETE'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -2242,7 +1693,7 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200) { + if (statusCode !== 200 && statusCode !== 202 && statusCode !== 204) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -2270,23 +1721,6 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { // Create Result let result = null; if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['ClusterListResult']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } return callback(null, result, httpRequest, response); }); @@ -2543,220 +1977,28 @@ function _listByWorkspaceNext(nextPageLink, options, callback) { } } - return callback(null, result, httpRequest, response); - }); -} - -/** Class representing a Clusters. */ -class Clusters { - /** - * Create a Clusters. - * @param {BatchAIManagementClient} client Reference to the service client. - */ - constructor(client) { - this.client = client; - this._list = _list; - this._listByResourceGroup = _listByResourceGroup; - this._create = _create; - this._update = _update; - this._deleteMethod = _deleteMethod; - this._get = _get; - this._listRemoteLoginInformation = _listRemoteLoginInformation; - this._listByWorkspace = _listByWorkspace; - this._beginCreate = _beginCreate; - this._beginDeleteMethod = _beginDeleteMethod; - this._listNext = _listNext; - this._listByResourceGroupNext = _listByResourceGroupNext; - this._listRemoteLoginInformationNext = _listRemoteLoginInformationNext; - this._listByWorkspaceNext = _listByWorkspaceNext; - } - - /** - * Gets a list of Clusters associated with the given subscription. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.clustersListOptions] Additional parameters for the - * operation - * - * @param {number} [options.clustersListOptions.maxResults] The maximum number - * of items to return in the response. A maximum of 1000 files can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listWithHttpOperationResponse(options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._list(options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } - - /** - * Gets a list of Clusters associated with the given subscription. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.clustersListOptions] Additional parameters for the - * operation - * - * @param {number} [options.clustersListOptions.maxResults] The maximum number - * of items to return in the response. A maximum of 1000 files can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {ClusterListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ - list(options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._list(options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._list(options, optionalCallback); - } - } - - /** - * Gets a list of Clusters within the specified resource group. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.clustersListByResourceGroupOptions] Additional - * parameters for the operation - * - * @param {number} [options.clustersListByResourceGroupOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listByResourceGroupWithHttpOperationResponse(resourceGroupName, options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._listByResourceGroup(resourceGroupName, options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } - + return callback(null, result, httpRequest, response); + }); +} + +/** Class representing a Clusters. */ +class Clusters { /** - * Gets a list of Clusters within the specified resource group. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.clustersListByResourceGroupOptions] Additional - * parameters for the operation - * - * @param {number} [options.clustersListByResourceGroupOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {ClusterListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. + * Create a Clusters. + * @param {BatchAIManagementClient} client Reference to the service client. */ - listByResourceGroup(resourceGroupName, options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._listByResourceGroup(resourceGroupName, options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._listByResourceGroup(resourceGroupName, options, optionalCallback); - } + constructor(client) { + this.client = client; + this._create = _create; + this._update = _update; + this._deleteMethod = _deleteMethod; + this._get = _get; + this._listRemoteLoginInformation = _listRemoteLoginInformation; + this._listByWorkspace = _listByWorkspace; + this._beginCreate = _beginCreate; + this._beginDeleteMethod = _beginDeleteMethod; + this._listRemoteLoginInformationNext = _listRemoteLoginInformationNext; + this._listByWorkspaceNext = _listByWorkspaceNext; } /** @@ -2777,75 +2019,74 @@ class Clusters { * @param {object} parameters The parameters to provide for the Cluster * creation. * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. + * Image reference. OS image reference for cluster nodes. * * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. + * Offer. Offer of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. + * SKU. SKU of the image. * * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * * @param {string} * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. * The virtual machine image must be in the same region and subscription as the * cluster. For information about the firewall settings for the Batch node * agent to communicate with the Batch service see @@ -2853,98 +2094,104 @@ class Clusters { * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to * download/install software. * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated * or rebooted. The command is executed in a bash subshell as a root. * * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. + * Component ID. Azure Application Insights component resource ID. * * @param {string} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * * @param {object} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * * @param {string} parameters.subnet.id The ID of the resource * @@ -2991,75 +2238,74 @@ class Clusters { * @param {object} parameters The parameters to provide for the Cluster * creation. * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. + * Image reference. OS image reference for cluster nodes. * * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. + * Offer. Offer of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. + * SKU. SKU of the image. * * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * * @param {string} * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. * The virtual machine image must be in the same region and subscription as the * cluster. For information about the firewall settings for the Batch node * agent to communicate with the Batch service see @@ -3067,98 +2313,104 @@ class Clusters { * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to * download/install software. * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated * or rebooted. The command is executed in a bash subshell as a root. * * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. + * Component ID. Azure Application Insights component resource ID. * * @param {string} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * * @param {object} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * * @param {string} parameters.subnet.id The ID of the resource * @@ -3224,39 +2476,38 @@ class Clusters { * characters along with dash (-) and underscore (_). The name must be from 1 * through 64 characters long. * - * @param {object} parameters Additional parameters for cluster update. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. + * @param {object} [options] Optional Parameters. * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster + * @param {object} [options.scaleSettings] Scale settings. Desired scale + * settings for the cluster. Batch AI service supports manual and auto scale + * clusters. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @param {object} [options.scaleSettings.manual] Manual scale settings. Manual + * scale settings for the cluster. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {number} options.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: + * @param {string} [options.scaleSettings.manual.nodeDeallocationOption] Node + * deallocation options. An action to be performed when the cluster size is + * decreasing. The default value is requeue. Possible values include: * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {object} [options.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {number} options.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * @param {number} options.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * - * @param {object} [options] Optional Parameters. + * @param {number} [options.scaleSettings.autoScale.initialNodeCount] Initial + * node count. The number of compute nodes to allocate on cluster creation. + * Note that this value is used only during cluster creation. Default: 0. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -3267,11 +2518,11 @@ class Clusters { * * @reject {Error} - The error object. */ - updateWithHttpOperationResponse(resourceGroupName, workspaceName, clusterName, parameters, options) { + updateWithHttpOperationResponse(resourceGroupName, workspaceName, clusterName, options) { let client = this.client; let self = this; return new Promise((resolve, reject) => { - self._update(resourceGroupName, workspaceName, clusterName, parameters, options, (err, result, request, response) => { + self._update(resourceGroupName, workspaceName, clusterName, options, (err, result, request, response) => { let httpOperationResponse = new msRest.HttpOperationResponse(request, response); httpOperationResponse.body = result; if (err) { reject(err); } @@ -3296,39 +2547,38 @@ class Clusters { * characters along with dash (-) and underscore (_). The name must be from 1 * through 64 characters long. * - * @param {object} parameters Additional parameters for cluster update. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. + * @param {object} [options] Optional Parameters. * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster + * @param {object} [options.scaleSettings] Scale settings. Desired scale + * settings for the cluster. Batch AI service supports manual and auto scale + * clusters. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @param {object} [options.scaleSettings.manual] Manual scale settings. Manual + * scale settings for the cluster. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {number} options.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: + * @param {string} [options.scaleSettings.manual.nodeDeallocationOption] Node + * deallocation options. An action to be performed when the cluster size is + * decreasing. The default value is requeue. Possible values include: * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {object} [options.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {number} options.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * @param {number} options.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * - * @param {object} [options] Optional Parameters. + * @param {number} [options.scaleSettings.autoScale.initialNodeCount] Initial + * node count. The number of compute nodes to allocate on cluster creation. + * Note that this value is used only during cluster creation. Default: 0. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -3355,7 +2605,7 @@ class Clusters { * * {stream} [response] - The HTTP Response stream if an error did not occur. */ - update(resourceGroupName, workspaceName, clusterName, parameters, options, optionalCallback) { + update(resourceGroupName, workspaceName, clusterName, options, optionalCallback) { let client = this.client; let self = this; if (!optionalCallback && typeof options === 'function') { @@ -3364,14 +2614,14 @@ class Clusters { } if (!optionalCallback) { return new Promise((resolve, reject) => { - self._update(resourceGroupName, workspaceName, clusterName, parameters, options, (err, result, request, response) => { + self._update(resourceGroupName, workspaceName, clusterName, options, (err, result, request, response) => { if (err) { reject(err); } else { resolve(result); } return; }); }); } else { - return self._update(resourceGroupName, workspaceName, clusterName, parameters, options, optionalCallback); + return self._update(resourceGroupName, workspaceName, clusterName, options, optionalCallback); } } @@ -3805,75 +3055,74 @@ class Clusters { * @param {object} parameters The parameters to provide for the Cluster * creation. * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. + * Image reference. OS image reference for cluster nodes. * * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. + * Offer. Offer of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. + * SKU. SKU of the image. * * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * * @param {string} * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. * The virtual machine image must be in the same region and subscription as the * cluster. For information about the firewall settings for the Batch node * agent to communicate with the Batch service see @@ -3881,98 +3130,104 @@ class Clusters { * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to * download/install software. * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated * or rebooted. The command is executed in a bash subshell as a root. * * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. + * Component ID. Azure Application Insights component resource ID. * * @param {string} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * * @param {object} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * * @param {string} parameters.subnet.id The ID of the resource * @@ -4019,75 +3274,74 @@ class Clusters { * @param {object} parameters The parameters to provide for the Cluster * creation. * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. + * Image reference. OS image reference for cluster nodes. * * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. + * Offer. Offer of the image. * * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. + * SKU. SKU of the image. * * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * * @param {string} * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. * The virtual machine image must be in the same region and subscription as the * cluster. For information about the firewall settings for the Batch node * agent to communicate with the Batch service see @@ -4095,98 +3349,104 @@ class Clusters { * Note, you need to provide publisher, offer and sku of the base OS image of * which the custom image has been derived from. * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to * download/install software. * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated * or rebooted. The command is executed in a bash subshell as a root. * * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. + * Component ID. Azure Application Insights component resource ID. * * @param {string} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * * @param {object} * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * * @param {object} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * * @param {string} parameters.subnet.id The ID of the resource * @@ -4338,174 +3598,6 @@ class Clusters { } } - /** - * Gets a list of Clusters associated with the given subscription. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listNextWithHttpOperationResponse(nextPageLink, options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._listNext(nextPageLink, options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } - - /** - * Gets a list of Clusters associated with the given subscription. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {ClusterListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ - listNext(nextPageLink, options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._listNext(nextPageLink, options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._listNext(nextPageLink, options, optionalCallback); - } - } - - /** - * Gets a list of Clusters within the specified resource group. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listByResourceGroupNextWithHttpOperationResponse(nextPageLink, options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._listByResourceGroupNext(nextPageLink, options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } - - /** - * Gets a list of Clusters within the specified resource group. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {ClusterListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ - listByResourceGroupNext(nextPageLink, options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._listByResourceGroupNext(nextPageLink, options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._listByResourceGroupNext(nextPageLink, options, optionalCallback); - } - } - /** * Get the IP address, port of all the compute nodes in the Cluster. * diff --git a/lib/services/batchaiManagement/lib/operations/fileServers.js b/lib/services/batchaiManagement/lib/operations/fileServers.js index 88602a1f94..3e83f6593b 100644 --- a/lib/services/batchaiManagement/lib/operations/fileServers.js +++ b/lib/services/batchaiManagement/lib/operations/fileServers.js @@ -14,176 +14,81 @@ const msRest = require('ms-rest'); const msRestAzure = require('ms-rest-azure'); const WebResource = msRest.WebResource; + /** - * Gets a list of File Servers associated with the given subscription. + * Creates a File Server in the given workspace. * - * @param {object} [options] Optional Parameters. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {object} [options.fileServersListOptions] Additional parameters for - * the operation + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {number} [options.fileServersListOptions.maxResults] The maximum - * number of items to return in the response. A maximum of 1000 files can be - * returned. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} parameters The parameters to provide for File Server + * creation. * - * @param {function} callback - The callback. + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). * - * @returns {function} callback(err, result, request, response) + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. * - * {Error} err - The Error object if an error occurred, null otherwise. + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. * - * {object} [request] - The HTTP Request object if an error did not occur. + * @param {string} + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _list(options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - if (!callback) { - throw new Error('callback cannot be null.'); - } - let fileServersListOptions = (options && options.fileServersListOptions !== undefined) ? options.fileServersListOptions : undefined; - // Validate - try { - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - let maxResults; - try { - if (fileServersListOptions !== null && fileServersListOptions !== undefined) - { - maxResults = fileServersListOptions.maxResults; - if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { - throw new Error('maxResults must be of type number.'); - } - } - } catch (error) { - return callback(error); - } - - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/providers/Microsoft.BatchAI/fileServers'; - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (maxResults !== null && maxResults !== undefined) { - queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); - } - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'GET'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - httpRequest.body = null; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['FileServerListResult']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } - - return callback(null, result, httpRequest, response); - }); -} - -/** - * Gets a list of File Servers within the specified resource group. + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. * - * @param {object} [options] Optional Parameters. + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. * - * @param {object} [options.fileServersListByResourceGroupOptions] Additional - * parameters for the operation + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. + * + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' + * + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. + * + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' + * + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. * - * @param {number} [options.fileServersListByResourceGroupOptions.maxResults] - * The maximum number of items to return in the response. A maximum of 1000 - * files can be returned. + * @param {string} parameters.subnet.id The ID of the resource + * + * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -195,345 +100,27 @@ function _list(options, callback) { * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * See {@link FileServer} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _listByResourceGroup(resourceGroupName, options, callback) { +function _create(resourceGroupName, workspaceName, fileServerName, parameters, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { callback = options; options = null; } + if (!callback) { throw new Error('callback cannot be null.'); } - let fileServersListByResourceGroupOptions = (options && options.fileServersListByResourceGroupOptions !== undefined) ? options.fileServersListByResourceGroupOptions : undefined; - // Validate - try { - if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { - throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); - } - if (resourceGroupName !== null && resourceGroupName !== undefined) { - if (resourceGroupName.match(/^[-\w\._]+$/) === null) - { - throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); - } - } - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - let maxResults; - try { - if (fileServersListByResourceGroupOptions !== null && fileServersListByResourceGroupOptions !== undefined) - { - maxResults = fileServersListByResourceGroupOptions.maxResults; - if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { - throw new Error('maxResults must be of type number.'); - } - } - } catch (error) { - return callback(error); - } - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/fileServers'; - requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (maxResults !== null && maxResults !== undefined) { - queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); - } - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'GET'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - httpRequest.body = null; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['FileServerListResult']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } - - return callback(null, result, httpRequest, response); - }); -} - - -/** - * Creates a File Server in the given workspace. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. - * - * @param {object} parameters The parameters to provide for File Server - * creation. - * - * @param {string} parameters.location The region in which to create the File - * Server. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. - * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). - * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. - * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. - * - * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. - * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. - * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' - * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. - * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. - * - * @param {string} parameters.subnet.id The ID of the resource - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServer} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _create(resourceGroupName, workspaceName, fileServerName, parameters, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - - if (!callback) { - throw new Error('callback cannot be null.'); - } - - // Send request - this.beginCreate(resourceGroupName, workspaceName, fileServerName, parameters, options, (err, parsedResult, httpRequest, response) => { - if (err) return callback(err); - - let initialResult = new msRest.HttpOperationResponse(); - initialResult.request = httpRequest; - initialResult.response = response; - initialResult.body = response.body; - client.getLongRunningOperationResult(initialResult, options, (err, pollingResult) => { - if (err) return callback(err); - - // Create Result - let result = null; - - httpRequest = pollingResult.request; - response = pollingResult.response; - let responseBody = pollingResult.body; - if (responseBody === '') responseBody = null; - - // Deserialize Response - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['FileServer']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - - return callback(null, result, httpRequest, response); - }); - }); -} - - -/** - * Deletes a File Server. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {null} [result] - The deserialized result object if an error did not occur. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _deleteMethod(resourceGroupName, workspaceName, fileServerName, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - - if (!callback) { - throw new Error('callback cannot be null.'); - } - - // Send request - this.beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, options, (err, parsedResult, httpRequest, response) => { - if (err) return callback(err); + // Send request + this.beginCreate(resourceGroupName, workspaceName, fileServerName, parameters, options, (err, parsedResult, httpRequest, response) => { + if (err) return callback(err); let initialResult = new msRest.HttpOperationResponse(); initialResult.request = httpRequest; @@ -546,385 +133,17 @@ function _deleteMethod(resourceGroupName, workspaceName, fileServerName, options let result = null; httpRequest = pollingResult.request; - response = pollingResult.response; - let responseBody = pollingResult.body; - if (responseBody === '') responseBody = null; - - // Deserialize Response - - return callback(null, result, httpRequest, response); - }); - }); -} - -/** - * Gets information about a File Server. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServer} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _get(resourceGroupName, workspaceName, fileServerName, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - if (!callback) { - throw new Error('callback cannot be null.'); - } - // Validate - try { - if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { - throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); - } - if (resourceGroupName !== null && resourceGroupName !== undefined) { - if (resourceGroupName.match(/^[-\w\._]+$/) === null) - { - throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); - } - } - if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { - throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); - } - if (workspaceName !== null && workspaceName !== undefined) { - if (workspaceName.length > 64) - { - throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); - } - if (workspaceName.length < 1) - { - throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); - } - if (workspaceName.match(/^[-\w_]+$/) === null) - { - throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } - if (fileServerName === null || fileServerName === undefined || typeof fileServerName.valueOf() !== 'string') { - throw new Error('fileServerName cannot be null or undefined and it must be of type string.'); - } - if (fileServerName !== null && fileServerName !== undefined) { - if (fileServerName.length > 64) - { - throw new Error('"fileServerName" should satisfy the constraint - "MaxLength": 64'); - } - if (fileServerName.length < 1) - { - throw new Error('"fileServerName" should satisfy the constraint - "MinLength": 1'); - } - if (fileServerName.match(/^[-\w_]+$/) === null) - { - throw new Error('"fileServerName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers/{fileServerName}'; - requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); - requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); - requestUrl = requestUrl.replace('{fileServerName}', encodeURIComponent(fileServerName)); - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'GET'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - httpRequest.body = null; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['FileServer']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } - - return callback(null, result, httpRequest, response); - }); -} - -/** - * Gets a list of File Servers associated with the specified workspace. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.fileServersListByWorkspaceOptions] Additional - * parameters for the operation - * - * @param {number} [options.fileServersListByWorkspaceOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} callback - The callback. - * - * @returns {function} callback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ -function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { - /* jshint validthis: true */ - let client = this.client; - if(!callback && typeof options === 'function') { - callback = options; - options = null; - } - if (!callback) { - throw new Error('callback cannot be null.'); - } - let fileServersListByWorkspaceOptions = (options && options.fileServersListByWorkspaceOptions !== undefined) ? options.fileServersListByWorkspaceOptions : undefined; - // Validate - try { - if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { - throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); - } - if (resourceGroupName !== null && resourceGroupName !== undefined) { - if (resourceGroupName.match(/^[-\w\._]+$/) === null) - { - throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); - } - } - if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { - throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); - } - if (workspaceName !== null && workspaceName !== undefined) { - if (workspaceName.length > 64) - { - throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); - } - if (workspaceName.length < 1) - { - throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); - } - if (workspaceName.match(/^[-\w_]+$/) === null) - { - throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } - if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { - throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); - } - if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { - throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); - } - if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { - throw new Error('this.client.acceptLanguage must be of type string.'); - } - } catch (error) { - return callback(error); - } - let maxResults; - try { - if (fileServersListByWorkspaceOptions !== null && fileServersListByWorkspaceOptions !== undefined) - { - maxResults = fileServersListByWorkspaceOptions.maxResults; - if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { - throw new Error('maxResults must be of type number.'); - } - } - } catch (error) { - return callback(error); - } - - // Construct URL - let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers'; - requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); - requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); - requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); - let queryParameters = []; - queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); - if (maxResults !== null && maxResults !== undefined) { - queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); - } - if (queryParameters.length > 0) { - requestUrl += '?' + queryParameters.join('&'); - } - - // Create HTTP transport objects - let httpRequest = new WebResource(); - httpRequest.method = 'GET'; - httpRequest.url = requestUrl; - httpRequest.headers = {}; - // Set Headers - httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; - if (this.client.generateClientRequestId) { - httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); - } - if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { - httpRequest.headers['accept-language'] = this.client.acceptLanguage; - } - if(options) { - for(let headerName in options['customHeaders']) { - if (options['customHeaders'].hasOwnProperty(headerName)) { - httpRequest.headers[headerName] = options['customHeaders'][headerName]; - } - } - } - httpRequest.body = null; - // Send Request - return client.pipeline(httpRequest, (err, response, responseBody) => { - if (err) { - return callback(err); - } - let statusCode = response.statusCode; - if (statusCode !== 200) { - let error = new Error(responseBody); - error.statusCode = response.statusCode; - error.request = msRest.stripRequest(httpRequest); - error.response = msRest.stripResponse(response); - if (responseBody === '') responseBody = null; - let parsedErrorResponse; - try { - parsedErrorResponse = JSON.parse(responseBody); - if (parsedErrorResponse) { - if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; - if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; - if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; - } - if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { - let resultMapper = new client.models['CloudError']().mapper(); - error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); - } - } catch (defaultError) { - error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + - `- "${responseBody}" for the default response.`; - return callback(error); - } - return callback(error); - } - // Create Result - let result = null; - if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { + response = pollingResult.response; + let responseBody = pollingResult.body; + if (responseBody === '') responseBody = null; + + // Deserialize Response let parsedResponse = null; try { parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['FileServerListResult']().mapper(); + let resultMapper = new client.models['FileServer']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { @@ -933,14 +152,15 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { deserializationError.response = msRest.stripResponse(response); return callback(deserializationError); } - } - return callback(null, result, httpRequest, response); + return callback(null, result, httpRequest, response); + }); }); } + /** - * Creates a File Server in the given workspace. + * Deletes a File Server. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -954,66 +174,75 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { * of alphanumeric characters along with dash (-) and underscore (_). The name * must be from 1 through 64 characters long. * - * @param {object} parameters The parameters to provide for File Server - * creation. - * - * @param {string} parameters.location The region in which to create the File - * Server. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. - * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). - * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. - * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. + * @param {object} [options] Optional Parameters. * - * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * @param {function} callback - The callback. * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * @returns {function} callback(err, result, request, response) * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. + * {Error} err - The Error object if an error occurred, null otherwise. * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. + * {null} [result] - The deserialized result object if an error did not occur. * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' + * {object} [request] - The HTTP Request object if an error did not occur. * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. + * {stream} [response] - The HTTP Response stream if an error did not occur. + */ +function _deleteMethod(resourceGroupName, workspaceName, fileServerName, options, callback) { + /* jshint validthis: true */ + let client = this.client; + if(!callback && typeof options === 'function') { + callback = options; + options = null; + } + + if (!callback) { + throw new Error('callback cannot be null.'); + } + + // Send request + this.beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, options, (err, parsedResult, httpRequest, response) => { + if (err) return callback(err); + + let initialResult = new msRest.HttpOperationResponse(); + initialResult.request = httpRequest; + initialResult.response = response; + initialResult.body = response.body; + client.getLongRunningOperationResult(initialResult, options, (err, pollingResult) => { + if (err) return callback(err); + + // Create Result + let result = null; + + httpRequest = pollingResult.request; + response = pollingResult.response; + let responseBody = pollingResult.body; + if (responseBody === '') responseBody = null; + + // Deserialize Response + + return callback(null, result, httpRequest, response); + }); + }); +} + +/** + * Gets information about a File Server. * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1033,7 +262,7 @@ function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _beginCreate(resourceGroupName, workspaceName, fileServerName, parameters, options, callback) { +function _get(resourceGroupName, workspaceName, fileServerName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1088,9 +317,6 @@ function _beginCreate(resourceGroupName, workspaceName, fileServerName, paramete throw new Error('"fileServerName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); } } - if (parameters === null || parameters === undefined) { - throw new Error('parameters cannot be null or undefined.'); - } if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); } @@ -1119,7 +345,7 @@ function _beginCreate(resourceGroupName, workspaceName, fileServerName, paramete // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'PUT'; + httpRequest.method = 'GET'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -1137,28 +363,14 @@ function _beginCreate(resourceGroupName, workspaceName, fileServerName, paramete } } } - // Serialize Request - let requestContent = null; - let requestModel = null; - try { - if (parameters !== null && parameters !== undefined) { - let requestModelMapper = new client.models['FileServerCreateParameters']().mapper(); - requestModel = client.serialize(requestModelMapper, parameters, 'parameters'); - requestContent = JSON.stringify(requestModel); - } - } catch (error) { - let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + - `payload - ${JSON.stringify(parameters, null, 2)}.`); - return callback(serializationError); - } - httpRequest.body = requestContent; + httpRequest.body = null; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200 && statusCode !== 202) { + if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -1209,7 +421,7 @@ function _beginCreate(resourceGroupName, workspaceName, fileServerName, paramete } /** - * Deletes a File Server. + * Gets a list of File Servers associated with the specified workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1218,13 +430,15 @@ function _beginCreate(resourceGroupName, workspaceName, fileServerName, paramete * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. - * * @param {object} [options] Optional Parameters. * + * @param {object} [options.fileServersListByWorkspaceOptions] Additional + * parameters for the operation + * + * @param {number} [options.fileServersListByWorkspaceOptions.maxResults] The + * maximum number of items to return in the response. A maximum of 1000 files + * can be returned. + * * @param {object} [options.customHeaders] Headers that will be added to the * request * @@ -1234,13 +448,14 @@ function _beginCreate(resourceGroupName, workspaceName, fileServerName, paramete * * {Error} err - The Error object if an error occurred, null otherwise. * - * {null} [result] - The deserialized result object if an error did not occur. + * {object} [result] - The deserialized result object if an error did not occur. + * See {@link FileServerListResult} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, options, callback) { +function _listByWorkspace(resourceGroupName, workspaceName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1250,6 +465,7 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, op if (!callback) { throw new Error('callback cannot be null.'); } + let fileServersListByWorkspaceOptions = (options && options.fileServersListByWorkspaceOptions !== undefined) ? options.fileServersListByWorkspaceOptions : undefined; // Validate try { if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { @@ -1278,23 +494,6 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, op throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); } } - if (fileServerName === null || fileServerName === undefined || typeof fileServerName.valueOf() !== 'string') { - throw new Error('fileServerName cannot be null or undefined and it must be of type string.'); - } - if (fileServerName !== null && fileServerName !== undefined) { - if (fileServerName.length > 64) - { - throw new Error('"fileServerName" should satisfy the constraint - "MaxLength": 64'); - } - if (fileServerName.length < 1) - { - throw new Error('"fileServerName" should satisfy the constraint - "MinLength": 1'); - } - if (fileServerName.match(/^[-\w_]+$/) === null) - { - throw new Error('"fileServerName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); - } - } if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); } @@ -1307,23 +506,37 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, op } catch (error) { return callback(error); } + let maxResults; + try { + if (fileServersListByWorkspaceOptions !== null && fileServersListByWorkspaceOptions !== undefined) + { + maxResults = fileServersListByWorkspaceOptions.maxResults; + if (maxResults !== null && maxResults !== undefined && typeof maxResults !== 'number') { + throw new Error('maxResults must be of type number.'); + } + } + } catch (error) { + return callback(error); + } // Construct URL let baseUrl = this.client.baseUri; - let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers/{fileServerName}'; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers'; requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); - requestUrl = requestUrl.replace('{fileServerName}', encodeURIComponent(fileServerName)); requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); let queryParameters = []; queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); + if (maxResults !== null && maxResults !== undefined) { + queryParameters.push('maxresults=' + encodeURIComponent(maxResults.toString())); + } if (queryParameters.length > 0) { requestUrl += '?' + queryParameters.join('&'); } // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'DELETE'; + httpRequest.method = 'GET'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -1348,7 +561,7 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, op return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200 && statusCode !== 202 && statusCode !== 204) { + if (statusCode !== 200) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -1376,16 +589,100 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, op // Create Result let result = null; if (responseBody === '') responseBody = null; + // Deserialize Response + if (statusCode === 200) { + let parsedResponse = null; + try { + parsedResponse = JSON.parse(responseBody); + result = JSON.parse(responseBody); + if (parsedResponse !== null && parsedResponse !== undefined) { + let resultMapper = new client.models['FileServerListResult']().mapper(); + result = client.deserialize(resultMapper, parsedResponse, 'result'); + } + } catch (error) { + let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); + deserializationError.request = msRest.stripRequest(httpRequest); + deserializationError.response = msRest.stripResponse(response); + return callback(deserializationError); + } + } return callback(null, result, httpRequest, response); }); } /** - * Gets a list of File Servers associated with the given subscription. + * Creates a File Server in the given workspace. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. + * + * @param {object} parameters The parameters to provide for File Server + * creation. + * + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. + * + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. + * + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. + * + * @param {string} + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. + * + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. + * + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. + * + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' + * + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. + * + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' + * + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. + * + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -1399,13 +696,13 @@ function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, op * {Error} err - The Error object if an error occurred, null otherwise. * * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * See {@link FileServer} for more information. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _listNext(nextPageLink, options, callback) { +function _beginCreate(resourceGroupName, workspaceName, fileServerName, parameters, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1417,8 +714,57 @@ function _listNext(nextPageLink, options, callback) { } // Validate try { - if (nextPageLink === null || nextPageLink === undefined || typeof nextPageLink.valueOf() !== 'string') { - throw new Error('nextPageLink cannot be null or undefined and it must be of type string.'); + if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { + throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); + } + if (resourceGroupName !== null && resourceGroupName !== undefined) { + if (resourceGroupName.match(/^[-\w\._]+$/) === null) + { + throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); + } + } + if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { + throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); + } + if (workspaceName !== null && workspaceName !== undefined) { + if (workspaceName.length > 64) + { + throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); + } + if (workspaceName.length < 1) + { + throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); + } + if (workspaceName.match(/^[-\w_]+$/) === null) + { + throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (fileServerName === null || fileServerName === undefined || typeof fileServerName.valueOf() !== 'string') { + throw new Error('fileServerName cannot be null or undefined and it must be of type string.'); + } + if (fileServerName !== null && fileServerName !== undefined) { + if (fileServerName.length > 64) + { + throw new Error('"fileServerName" should satisfy the constraint - "MaxLength": 64'); + } + if (fileServerName.length < 1) + { + throw new Error('"fileServerName" should satisfy the constraint - "MinLength": 1'); + } + if (fileServerName.match(/^[-\w_]+$/) === null) + { + throw new Error('"fileServerName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (parameters === null || parameters === undefined) { + throw new Error('parameters cannot be null or undefined.'); + } + if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { + throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); + } + if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { + throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); } if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { throw new Error('this.client.acceptLanguage must be of type string.'); @@ -1428,12 +774,21 @@ function _listNext(nextPageLink, options, callback) { } // Construct URL - let requestUrl = '{nextLink}'; - requestUrl = requestUrl.replace('{nextLink}', nextPageLink); + let baseUrl = this.client.baseUri; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers/{fileServerName}'; + requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); + requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); + requestUrl = requestUrl.replace('{fileServerName}', encodeURIComponent(fileServerName)); + requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); + let queryParameters = []; + queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); + if (queryParameters.length > 0) { + requestUrl += '?' + queryParameters.join('&'); + } // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'GET'; + httpRequest.method = 'PUT'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -1451,14 +806,28 @@ function _listNext(nextPageLink, options, callback) { } } } - httpRequest.body = null; + // Serialize Request + let requestContent = null; + let requestModel = null; + try { + if (parameters !== null && parameters !== undefined) { + let requestModelMapper = new client.models['FileServerCreateParameters']().mapper(); + requestModel = client.serialize(requestModelMapper, parameters, 'parameters'); + requestContent = JSON.stringify(requestModel); + } + } catch (error) { + let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + + `payload - ${JSON.stringify(parameters, null, 2)}.`); + return callback(serializationError); + } + httpRequest.body = requestContent; // Send Request return client.pipeline(httpRequest, (err, response, responseBody) => { if (err) { return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200) { + if (statusCode !== 200 && statusCode !== 202) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -1493,7 +862,7 @@ function _listNext(nextPageLink, options, callback) { parsedResponse = JSON.parse(responseBody); result = JSON.parse(responseBody); if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['FileServerListResult']().mapper(); + let resultMapper = new client.models['FileServer']().mapper(); result = client.deserialize(resultMapper, parsedResponse, 'result'); } } catch (error) { @@ -1509,10 +878,19 @@ function _listNext(nextPageLink, options, callback) { } /** - * Gets a list of File Servers within the specified resource group. + * Deletes a File Server. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1525,14 +903,13 @@ function _listNext(nextPageLink, options, callback) { * * {Error} err - The Error object if an error occurred, null otherwise. * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * {null} [result] - The deserialized result object if an error did not occur. * * {object} [request] - The HTTP Request object if an error did not occur. * * {stream} [response] - The HTTP Response stream if an error did not occur. */ -function _listByResourceGroupNext(nextPageLink, options, callback) { +function _beginDeleteMethod(resourceGroupName, workspaceName, fileServerName, options, callback) { /* jshint validthis: true */ let client = this.client; if(!callback && typeof options === 'function') { @@ -1544,8 +921,54 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { } // Validate try { - if (nextPageLink === null || nextPageLink === undefined || typeof nextPageLink.valueOf() !== 'string') { - throw new Error('nextPageLink cannot be null or undefined and it must be of type string.'); + if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { + throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); + } + if (resourceGroupName !== null && resourceGroupName !== undefined) { + if (resourceGroupName.match(/^[-\w\._]+$/) === null) + { + throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); + } + } + if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { + throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); + } + if (workspaceName !== null && workspaceName !== undefined) { + if (workspaceName.length > 64) + { + throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); + } + if (workspaceName.length < 1) + { + throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); + } + if (workspaceName.match(/^[-\w_]+$/) === null) + { + throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (fileServerName === null || fileServerName === undefined || typeof fileServerName.valueOf() !== 'string') { + throw new Error('fileServerName cannot be null or undefined and it must be of type string.'); + } + if (fileServerName !== null && fileServerName !== undefined) { + if (fileServerName.length > 64) + { + throw new Error('"fileServerName" should satisfy the constraint - "MaxLength": 64'); + } + if (fileServerName.length < 1) + { + throw new Error('"fileServerName" should satisfy the constraint - "MinLength": 1'); + } + if (fileServerName.match(/^[-\w_]+$/) === null) + { + throw new Error('"fileServerName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { + throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); + } + if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { + throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); } if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { throw new Error('this.client.acceptLanguage must be of type string.'); @@ -1555,12 +978,21 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { } // Construct URL - let requestUrl = '{nextLink}'; - requestUrl = requestUrl.replace('{nextLink}', nextPageLink); + let baseUrl = this.client.baseUri; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}/fileServers/{fileServerName}'; + requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); + requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); + requestUrl = requestUrl.replace('{fileServerName}', encodeURIComponent(fileServerName)); + requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); + let queryParameters = []; + queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); + if (queryParameters.length > 0) { + requestUrl += '?' + queryParameters.join('&'); + } // Create HTTP transport objects let httpRequest = new WebResource(); - httpRequest.method = 'GET'; + httpRequest.method = 'DELETE'; httpRequest.url = requestUrl; httpRequest.headers = {}; // Set Headers @@ -1585,7 +1017,7 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { return callback(err); } let statusCode = response.statusCode; - if (statusCode !== 200) { + if (statusCode !== 200 && statusCode !== 202 && statusCode !== 204) { let error = new Error(responseBody); error.statusCode = response.statusCode; error.request = msRest.stripRequest(httpRequest); @@ -1613,23 +1045,6 @@ function _listByResourceGroupNext(nextPageLink, options, callback) { // Create Result let result = null; if (responseBody === '') responseBody = null; - // Deserialize Response - if (statusCode === 200) { - let parsedResponse = null; - try { - parsedResponse = JSON.parse(responseBody); - result = JSON.parse(responseBody); - if (parsedResponse !== null && parsedResponse !== undefined) { - let resultMapper = new client.models['FileServerListResult']().mapper(); - result = client.deserialize(resultMapper, parsedResponse, 'result'); - } - } catch (error) { - let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); - deserializationError.request = msRest.stripRequest(httpRequest); - deserializationError.response = msRest.stripResponse(response); - return callback(deserializationError); - } - } return callback(null, result, httpRequest, response); }); @@ -1758,219 +1173,25 @@ function _listByWorkspaceNext(nextPageLink, options, callback) { } } - return callback(null, result, httpRequest, response); - }); -} - -/** Class representing a FileServers. */ -class FileServers { - /** - * Create a FileServers. - * @param {BatchAIManagementClient} client Reference to the service client. - */ - constructor(client) { - this.client = client; - this._list = _list; - this._listByResourceGroup = _listByResourceGroup; - this._create = _create; - this._deleteMethod = _deleteMethod; - this._get = _get; - this._listByWorkspace = _listByWorkspace; - this._beginCreate = _beginCreate; - this._beginDeleteMethod = _beginDeleteMethod; - this._listNext = _listNext; - this._listByResourceGroupNext = _listByResourceGroupNext; - this._listByWorkspaceNext = _listByWorkspaceNext; - } - - /** - * Gets a list of File Servers associated with the given subscription. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.fileServersListOptions] Additional parameters for - * the operation - * - * @param {number} [options.fileServersListOptions.maxResults] The maximum - * number of items to return in the response. A maximum of 1000 files can be - * returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listWithHttpOperationResponse(options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._list(options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } - - /** - * Gets a list of File Servers associated with the given subscription. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.fileServersListOptions] Additional parameters for - * the operation - * - * @param {number} [options.fileServersListOptions.maxResults] The maximum - * number of items to return in the response. A maximum of 1000 files can be - * returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {FileServerListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ - list(options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._list(options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._list(options, optionalCallback); - } - } - - /** - * Gets a list of File Servers within the specified resource group. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.fileServersListByResourceGroupOptions] Additional - * parameters for the operation - * - * @param {number} [options.fileServersListByResourceGroupOptions.maxResults] - * The maximum number of items to return in the response. A maximum of 1000 - * files can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listByResourceGroupWithHttpOperationResponse(resourceGroupName, options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._listByResourceGroup(resourceGroupName, options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } + return callback(null, result, httpRequest, response); + }); +} +/** Class representing a FileServers. */ +class FileServers { /** - * Gets a list of File Servers within the specified resource group. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.fileServersListByResourceGroupOptions] Additional - * parameters for the operation - * - * @param {number} [options.fileServersListByResourceGroupOptions.maxResults] - * The maximum number of items to return in the response. A maximum of 1000 - * files can be returned. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {FileServerListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. + * Create a FileServers. + * @param {BatchAIManagementClient} client Reference to the service client. */ - listByResourceGroup(resourceGroupName, options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._listByResourceGroup(resourceGroupName, options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._listByResourceGroup(resourceGroupName, options, optionalCallback); - } + constructor(client) { + this.client = client; + this._create = _create; + this._deleteMethod = _deleteMethod; + this._get = _get; + this._listByWorkspace = _listByWorkspace; + this._beginCreate = _beginCreate; + this._beginDeleteMethod = _beginDeleteMethod; + this._listByWorkspaceNext = _listByWorkspaceNext; } /** @@ -1991,61 +1212,58 @@ class FileServers { * @param {object} parameters The parameters to provide for File Server * creation. * - * @param {string} parameters.location The region in which to create the File - * Server. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. - * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. * * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. * * @param {string} * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. * * @param {string} parameters.subnet.id The ID of the resource * @@ -2092,61 +1310,58 @@ class FileServers { * @param {object} parameters The parameters to provide for File Server * creation. * - * @param {string} parameters.location The region in which to create the File - * Server. + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. - * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. * * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. * * @param {string} * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. * * @param {string} parameters.subnet.id The ID of the resource * @@ -2524,61 +1739,58 @@ class FileServers { * @param {object} parameters The parameters to provide for File Server * creation. * - * @param {string} parameters.location The region in which to create the File - * Server. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. - * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. * * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. * * @param {string} * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. * * @param {string} parameters.subnet.id The ID of the resource * @@ -2625,61 +1837,58 @@ class FileServers { * @param {object} parameters The parameters to provide for File Server * creation. * - * @param {string} parameters.location The region in which to create the File - * Server. + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. - * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. * * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. * * @param {string} * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * public key. SSH public key of the administrator user account. * * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. * * @param {string} parameters.subnet.id The ID of the resource * @@ -2831,174 +2040,6 @@ class FileServers { } } - /** - * Gets a list of File Servers associated with the given subscription. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listNextWithHttpOperationResponse(nextPageLink, options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._listNext(nextPageLink, options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } - - /** - * Gets a list of File Servers associated with the given subscription. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {FileServerListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ - listNext(nextPageLink, options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._listNext(nextPageLink, options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._listNext(nextPageLink, options, optionalCallback); - } - } - - /** - * Gets a list of File Servers within the specified resource group. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error} - The error object. - */ - listByResourceGroupNextWithHttpOperationResponse(nextPageLink, options) { - let client = this.client; - let self = this; - return new Promise((resolve, reject) => { - self._listByResourceGroupNext(nextPageLink, options, (err, result, request, response) => { - let httpOperationResponse = new msRest.HttpOperationResponse(request, response); - httpOperationResponse.body = result; - if (err) { reject(err); } - else { resolve(httpOperationResponse); } - return; - }); - }); - } - - /** - * Gets a list of File Servers within the specified resource group. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {function} [optionalCallback] - The optional callback. - * - * @returns {function|Promise} If a callback was passed as the last parameter - * then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned - * - * @resolve {FileServerListResult} - The deserialized result object. - * - * @reject {Error} - The error object. - * - * {function} optionalCallback(err, result, request, response) - * - * {Error} err - The Error object if an error occurred, null otherwise. - * - * {object} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. - * - * {object} [request] - The HTTP Request object if an error did not occur. - * - * {stream} [response] - The HTTP Response stream if an error did not occur. - */ - listByResourceGroupNext(nextPageLink, options, optionalCallback) { - let client = this.client; - let self = this; - if (!optionalCallback && typeof options === 'function') { - optionalCallback = options; - options = null; - } - if (!optionalCallback) { - return new Promise((resolve, reject) => { - self._listByResourceGroupNext(nextPageLink, options, (err, result, request, response) => { - if (err) { reject(err); } - else { resolve(result); } - return; - }); - }); - } else { - return self._listByResourceGroupNext(nextPageLink, options, optionalCallback); - } - } - /** * Gets a list of File Servers associated with the specified workspace. * diff --git a/lib/services/batchaiManagement/lib/operations/index.d.ts b/lib/services/batchaiManagement/lib/operations/index.d.ts index 8474bffd5d..a5e2c7da8e 100644 --- a/lib/services/batchaiManagement/lib/operations/index.d.ts +++ b/lib/services/batchaiManagement/lib/operations/index.d.ts @@ -256,45 +256,47 @@ export interface Usages { /** * @class - * Clusters + * Workspaces * __NOTE__: An instance of this class is automatically created for an * instance of the BatchAIManagementClient. */ -export interface Clusters { +export interface Workspaces { /** - * Gets a list of Clusters associated with the given subscription. + * Gets a list of Workspaces associated with the given subscription. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.clustersListOptions] Additional parameters for the - * operation + * @param {object} [options.workspacesListOptions] Additional parameters for + * the operation * - * @param {number} [options.clustersListOptions.maxResults] The maximum number - * of items to return in the response. A maximum of 1000 files can be returned. + * @param {number} [options.workspacesListOptions.maxResults] The maximum + * number of items to return in the response. A maximum of 1000 files can be + * returned. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listWithHttpOperationResponse(options?: { clustersListOptions? : models.ClustersListOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; + listWithHttpOperationResponse(options?: { workspacesListOptions? : models.WorkspacesListOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Clusters associated with the given subscription. + * Gets a list of Workspaces associated with the given subscription. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.clustersListOptions] Additional parameters for the - * operation + * @param {object} [options.workspacesListOptions] Additional parameters for + * the operation * - * @param {number} [options.clustersListOptions.maxResults] The maximum number - * of items to return in the response. A maximum of 1000 files can be returned. + * @param {number} [options.workspacesListOptions.maxResults] The maximum + * number of items to return in the response. A maximum of 1000 files can be + * returned. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -306,7 +308,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {ClusterListResult} - The deserialized result object. + * @resolve {WorkspaceListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -314,58 +316,58 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ClusterListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. + * See {@link WorkspaceListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - list(options?: { clustersListOptions? : models.ClustersListOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - list(callback: ServiceCallback): void; - list(options: { clustersListOptions? : models.ClustersListOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + list(options?: { workspacesListOptions? : models.WorkspacesListOptions, customHeaders? : { [headerName: string]: string; } }): Promise; + list(callback: ServiceCallback): void; + list(options: { workspacesListOptions? : models.WorkspacesListOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Clusters within the specified resource group. + * Gets a list of Workspaces within the specified resource group. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.clustersListByResourceGroupOptions] Additional + * @param {object} [options.workspacesListByResourceGroupOptions] Additional * parameters for the operation * - * @param {number} [options.clustersListByResourceGroupOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. + * @param {number} [options.workspacesListByResourceGroupOptions.maxResults] + * The maximum number of items to return in the response. A maximum of 1000 + * files can be returned. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByResourceGroupWithHttpOperationResponse(resourceGroupName: string, options?: { clustersListByResourceGroupOptions? : models.ClustersListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; + listByResourceGroupWithHttpOperationResponse(resourceGroupName: string, options?: { workspacesListByResourceGroupOptions? : models.WorkspacesListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Clusters within the specified resource group. + * Gets a list of Workspaces within the specified resource group. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.clustersListByResourceGroupOptions] Additional + * @param {object} [options.workspacesListByResourceGroupOptions] Additional * parameters for the operation * - * @param {number} [options.clustersListByResourceGroupOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. + * @param {number} [options.workspacesListByResourceGroupOptions.maxResults] + * The maximum number of items to return in the response. A maximum of 1000 + * files can be returned. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -377,7 +379,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {ClusterListResult} - The deserialized result object. + * @resolve {WorkspaceListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -385,20 +387,20 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ClusterListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. + * See {@link WorkspaceListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByResourceGroup(resourceGroupName: string, options?: { clustersListByResourceGroupOptions? : models.ClustersListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - listByResourceGroup(resourceGroupName: string, callback: ServiceCallback): void; - listByResourceGroup(resourceGroupName: string, options: { clustersListByResourceGroupOptions? : models.ClustersListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByResourceGroup(resourceGroupName: string, options?: { workspacesListByResourceGroupOptions? : models.WorkspacesListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise; + listByResourceGroup(resourceGroupName: string, callback: ServiceCallback): void; + listByResourceGroup(resourceGroupName: string, options: { workspacesListByResourceGroupOptions? : models.WorkspacesListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Creates a Cluster in the given Workspace. + * Creates a Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -407,184 +409,157 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {object} parameters Workspace creation parameters. * - * @param {object} parameters The parameters to provide for the Cluster - * creation. + * @param {string} parameters.location Location. The region in which to create + * the Workspace. * - * @param {string} parameters.location The region in which to create the - * cluster. + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. + * @param {object} [options] Optional Parameters. * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For - * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' + * @returns {Promise} A promise is returned * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. + * @resolve {HttpOperationResponse} - The deserialized result object. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @reject {Error|ServiceError} - The error object. + */ + createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + + /** + * Creates a Workspace. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings + * @param {object} parameters Workspace creation parameters. * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {string} parameters.location Location. The region in which to create + * the Workspace. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * @param {object} [options] Optional Parameters. * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. + * @param {ServiceCallback} [optionalCallback] - The optional callback. * - * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. + * @returns {ServiceCallback|Promise} If a callback was passed as the last + * parameter then it returns the callback else returns a Promise. * - * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. + * {Promise} A promise is returned. * - * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. + * @resolve {Workspace} - The deserialized result object. * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. + * @reject {Error|ServiceError} - The error object. * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} - * The virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - * Note, you need to provide publisher, offer and sku of the base OS image of - * which the custom image has been derived from. + * {ServiceCallback} optionalCallback(err, result, request, response) * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. + * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to - * download/install software. + * {Workspace} [result] - The deserialized result object if an error did not occur. + * See {@link Workspace} for more information. * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated - * or rebooted. The command is executed in a bash subshell as a root. + * {WebResource} [request] - The HTTP Request object if an error did not occur. * - * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. + * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. + */ + create(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + create(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + + + /** + * Updates properties of a Workspace. * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. + * @param {object} [options] Optional Parameters. * - * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. + * @param {object} [options.tags] Tags. The user specified tags associated with + * the Workspace. * - * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. + * @returns {Promise} A promise is returned * - * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. + * @resolve {HttpOperationResponse} - The deserialized result object. * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. + * @reject {Error|ServiceError} - The error object. + */ + updateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { tags? : { [propertyName: string]: string }, customHeaders? : { [headerName: string]: string; } }): Promise>; + + /** + * Updates properties of a Workspace. * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. + * @param {object} [options] Optional Parameters. * - * @param {object} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. + * @param {object} [options.tags] Tags. The user specified tags associated with + * the Workspace. * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {string} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * @param {ServiceCallback} [optionalCallback] - The optional callback. * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. + * @returns {ServiceCallback|Promise} If a callback was passed as the last + * parameter then it returns the callback else returns a Promise. * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * {Promise} A promise is returned. * - * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * @resolve {Workspace} - The deserialized result object. * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * @reject {Error|ServiceError} - The error object. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * {ServiceCallback} optionalCallback(err, result, request, response) * - * @param {string} parameters.subnet.id The ID of the resource + * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * + * {Workspace} [result] - The deserialized result object if an error did not occur. + * See {@link Workspace} for more information. + * + * {WebResource} [request] - The HTTP Request object if an error did not occur. + * + * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. + */ + update(resourceGroupName: string, workspaceName: string, options?: { tags? : { [propertyName: string]: string }, customHeaders? : { [headerName: string]: string; } }): Promise; + update(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; + update(resourceGroupName: string, workspaceName: string, options: { tags? : { [propertyName: string]: string }, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + + + /** + * Deletes a Workspace. + * + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -593,14 +568,14 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates a Cluster in the given Workspace. + * Deletes a Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -609,184 +584,69 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * - * @param {object} parameters The parameters to provide for the Cluster - * creation. + * @param {object} [options] Optional Parameters. * - * @param {string} parameters.location The region in which to create the - * cluster. + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. + * @param {ServiceCallback} [optionalCallback] - The optional callback. * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For - * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). + * @returns {ServiceCallback|Promise} If a callback was passed as the last + * parameter then it returns the callback else returns a Promise. * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' + * {Promise} A promise is returned. * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. + * @resolve {null} - The deserialized result object. * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings + * @reject {Error|ServiceError} - The error object. * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. + * {ServiceCallback} optionalCallback(err, result, request, response) * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' + * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings + * {null} [result] - The deserialized result object if an error did not occur. * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * {WebResource} [request] - The HTTP Request object if an error did not occur. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. + */ + deleteMethod(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + deleteMethod(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + + + /** + * Gets information about a Workspace. * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. + * @param {object} [options] Optional Parameters. * - * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. + * @param {object} [options.customHeaders] Headers that will be added to the + * request * - * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. + * @returns {Promise} A promise is returned * - * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. + * @resolve {HttpOperationResponse} - The deserialized result object. * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. + * @reject {Error|ServiceError} - The error object. + */ + getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + + /** + * Gets information about a Workspace. * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} - * The virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - * Note, you need to provide publisher, offer and sku of the base OS image of - * which the custom image has been derived from. - * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. - * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to - * download/install software. - * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated - * or rebooted. The command is executed in a bash subshell as a root. - * - * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. - * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. - * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. - * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. - * - * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. - * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. - * - * @param {string} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. - * - * @param {object} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. - * - * @param {string} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. - * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -800,7 +660,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {Cluster} - The deserialized result object. + * @resolve {Workspace} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -808,20 +668,20 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Cluster} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. + * {Workspace} [result] - The deserialized result object if an error did not occur. + * See {@link Workspace} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - create(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - create(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, callback: ServiceCallback): void; - create(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + get(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Updates properties of a Cluster. + * Creates a Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -830,42 +690,13 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * - * @param {object} parameters Additional parameters for cluster update. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster - * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings - * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. - * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' - * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {object} parameters Workspace creation parameters. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {string} parameters.location Location. The region in which to create + * the Workspace. * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. * * @param {object} [options] Optional Parameters. * @@ -874,14 +705,14 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - updateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterUpdateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Updates properties of a Cluster. + * Creates a Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -890,42 +721,13 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * - * @param {object} parameters Additional parameters for cluster update. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster - * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings - * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. - * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' - * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. + * @param {object} parameters Workspace creation parameters. * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. + * @param {string} parameters.location Location. The region in which to create + * the Workspace. * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. * * @param {object} [options] Optional Parameters. * @@ -939,7 +741,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {Cluster} - The deserialized result object. + * @resolve {Workspace} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -947,20 +749,20 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Cluster} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. + * {Workspace} [result] - The deserialized result object if an error did not occur. + * See {@link Workspace} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - update(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterUpdateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - update(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterUpdateParameters, callback: ServiceCallback): void; - update(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterUpdateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginCreate(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes a Cluster. + * Deletes a Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -969,11 +771,6 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -985,10 +782,10 @@ export interface Clusters { * * @reject {Error|ServiceError} - The error object. */ - deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes a Cluster. + * Deletes a Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -997,11 +794,6 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -1028,25 +820,16 @@ export interface Clusters { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - deleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - deleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; - deleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets information about a Cluster. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * Gets a list of Workspaces associated with the given subscription. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * * @param {object} [options] Optional Parameters. * @@ -1055,26 +838,17 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets information about a Cluster. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * Gets a list of Workspaces associated with the given subscription. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * * @param {object} [options] Optional Parameters. * @@ -1088,7 +862,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {Cluster} - The deserialized result object. + * @resolve {WorkspaceListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1096,32 +870,23 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Cluster} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. + * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. + * See {@link WorkspaceListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - get(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - get(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; - get(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listNext(nextPageLink: string, callback: ServiceCallback): void; + listNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Get the IP address, port of all the compute nodes in the Cluster. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * Gets a list of Workspaces within the specified resource group. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * * @param {object} [options] Optional Parameters. * @@ -1130,26 +895,17 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listRemoteLoginInformationWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listByResourceGroupNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Get the IP address, port of all the compute nodes in the Cluster. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * Gets a list of Workspaces within the specified resource group. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * * @param {object} [options] Optional Parameters. * @@ -1163,7 +919,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {RemoteLoginInformationListResult} - The deserialized result object. + * @resolve {WorkspaceListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1171,21 +927,29 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {RemoteLoginInformationListResult} [result] - The deserialized result object if an error did not occur. - * See {@link RemoteLoginInformationListResult} for more - * information. + * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. + * See {@link WorkspaceListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; - listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByResourceGroupNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listByResourceGroupNext(nextPageLink: string, callback: ServiceCallback): void; + listByResourceGroupNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; +} + +/** + * @class + * Experiments + * __NOTE__: An instance of this class is automatically created for an + * instance of the BatchAIManagementClient. + */ +export interface Experiments { /** - * Gets information about Clusters associated with the given Workspace. + * Gets a list of Experiments within the specified Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1196,10 +960,10 @@ export interface Clusters { * * @param {object} [options] Optional Parameters. * - * @param {object} [options.clustersListByWorkspaceOptions] Additional + * @param {object} [options.experimentsListByWorkspaceOptions] Additional * parameters for the operation * - * @param {number} [options.clustersListByWorkspaceOptions.maxResults] The + * @param {number} [options.experimentsListByWorkspaceOptions.maxResults] The * maximum number of items to return in the response. A maximum of 1000 files * can be returned. * @@ -1208,14 +972,14 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByWorkspaceWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { clustersListByWorkspaceOptions? : models.ClustersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; + listByWorkspaceWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { experimentsListByWorkspaceOptions? : models.ExperimentsListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets information about Clusters associated with the given Workspace. + * Gets a list of Experiments within the specified Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1226,10 +990,10 @@ export interface Clusters { * * @param {object} [options] Optional Parameters. * - * @param {object} [options.clustersListByWorkspaceOptions] Additional + * @param {object} [options.experimentsListByWorkspaceOptions] Additional * parameters for the operation * - * @param {number} [options.clustersListByWorkspaceOptions.maxResults] The + * @param {number} [options.experimentsListByWorkspaceOptions.maxResults] The * maximum number of items to return in the response. A maximum of 1000 files * can be returned. * @@ -1243,7 +1007,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {ClusterListResult} - The deserialized result object. + * @resolve {ExperimentListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1251,20 +1015,20 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ClusterListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * {ExperimentListResult} [result] - The deserialized result object if an error did not occur. + * See {@link ExperimentListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByWorkspace(resourceGroupName: string, workspaceName: string, options?: { clustersListByWorkspaceOptions? : models.ClustersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - listByWorkspace(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; - listByWorkspace(resourceGroupName: string, workspaceName: string, options: { clustersListByWorkspaceOptions? : models.ClustersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByWorkspace(resourceGroupName: string, workspaceName: string, options?: { experimentsListByWorkspaceOptions? : models.ExperimentsListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise; + listByWorkspace(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; + listByWorkspace(resourceGroupName: string, workspaceName: string, options: { experimentsListByWorkspaceOptions? : models.ExperimentsListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Creates a Cluster in the given Workspace. + * Creates an Experiment. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1273,184 +1037,9 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * - * @param {object} parameters The parameters to provide for the Cluster - * creation. - * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For - * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). - * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' - * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. - * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings - * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. - * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' - * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. - * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. - * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. - * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. - * - * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. - * - * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} - * The virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - * Note, you need to provide publisher, offer and sku of the base OS image of - * which the custom image has been derived from. - * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. - * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to - * download/install software. - * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated - * or rebooted. The command is executed in a bash subshell as a root. - * - * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. - * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. - * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. - * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. - * - * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. - * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. - * - * @param {string} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. - * - * @param {object} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. - * - * @param {string} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. - * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. - * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1459,14 +1048,14 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates a Cluster in the given Workspace. + * Creates an Experiment. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1475,184 +1064,9 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. - * - * @param {object} parameters The parameters to provide for the Cluster - * creation. - * - * @param {string} parameters.location The region in which to create the - * cluster. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the Cluster. - * - * @param {string} parameters.vmSize The size of the virtual machines in the - * cluster. All virtual machines in a cluster are the same size. For - * information about available VM sizes for clusters using images from the - * Virtual Machines Marketplace (see Sizes for Virtual Machines (Linux) or - * Sizes for Virtual Machines (Windows). Batch AI service supports all Azure VM - * sizes except STANDARD_A0 and those with premium storage (STANDARD_GS, - * STANDARD_DS, and STANDARD_DSV2 series). - * - * @param {string} [parameters.vmPriority] dedicated or lowpriority. Default is - * dedicated. Possible values include: 'dedicated', 'lowpriority' - * - * @param {object} [parameters.scaleSettings] Desired scale for the cluster. - * - * @param {object} [parameters.scaleSettings.manual] The scale for the cluster - * by manual settings - * - * @param {number} parameters.scaleSettings.manual.targetNodeCount The desired - * number of compute nodes in the Cluster. Default is 0. If autoScaleSettings - * are not specified, then the Cluster starts with this target. - * - * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] - * Determines what to do with the job(s) running on compute node if the Cluster - * size is decreasing. The default value is requeue. Possible values include: - * 'requeue', 'terminate', 'waitforjobcompletion' - * - * @param {object} [parameters.scaleSettings.autoScale] The scale for the - * cluster by autoscale settings - * - * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount - * Specifies the minimum number of compute nodes the cluster can have. - * - * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount - * Specifies the maximum number of compute nodes the cluster can have. - * - * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] - * Specifies the number of compute nodes to allocate on cluster creation. Note - * that this value is used only during cluster creation. - * - * @param {object} [parameters.virtualMachineConfiguration] Settings for OS - * image and mounted data volumes. - * - * @param {object} [parameters.virtualMachineConfiguration.imageReference] - * Reference to OS image. - * - * @param {string} - * parameters.virtualMachineConfiguration.imageReference.publisher Publisher of - * the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.offer - * Offer of the image. - * - * @param {string} parameters.virtualMachineConfiguration.imageReference.sku - * SKU of the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.version] Version of - * the image. - * - * @param {string} - * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] - * The ARM resource identifier of the virtual machine image. Computes nodes of - * the cluster will be created using this custom image. This is of the form - * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName} - * The virtual machine image must be in the same region and subscription as the - * cluster. For information about the firewall settings for the Batch node - * agent to communicate with the Batch service see - * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. - * Note, you need to provide publisher, offer and sku of the base OS image of - * which the custom image has been derived from. - * - * @param {object} [parameters.nodeSetup] Setup to be done on all compute nodes - * in the cluster. - * - * @param {object} [parameters.nodeSetup.setupTask] Specifies a setup task - * which can be used to customize the compute nodes of the cluster. The - * NodeSetup task runs everytime a VM is rebooted. For that reason the task - * code needs to be idempotent. Generally it is used to either download static - * data that is required for all jobs that run on the cluster VMs or to - * download/install software. - * - * @param {string} parameters.nodeSetup.setupTask.commandLine Command line to - * be executed on each cluster's node after it being allocated or rebooted. - * Command line to be executed on each cluster's node after it being allocated - * or rebooted. The command is executed in a bash subshell as a root. - * - * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] - * Collection of environment variables to be set for setup task. - * - * @param {array} [parameters.nodeSetup.setupTask.secrets] Collection of - * environment variables with secret values to be set for setup task. Server - * will never report values of these variables back. - * - * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix The - * prefix of a path where the Batch AI service will upload the stdout and - * stderr of the setup task. - * - * @param {object} [parameters.nodeSetup.mountVolumes] Information on shared - * volumes to be used by jobs. Specified mount volumes will be available to all - * jobs executing on the cluster. The volumes will be mounted at location - * specified by $AZ_BATCHAI_MOUNT_ROOT environment variable. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure - * File Share setup configuration. References to Azure File Shares that are to - * be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] - * Azure Blob FileSystem setup configuration. References to Azure Blob FUSE - * that are to be mounted to the cluster nodes. - * - * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] References to - * a list of file servers that are mounted to the cluster node. - * - * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] - * References to a list of file servers that are mounted to the cluster node. - * - * @param {object} [parameters.nodeSetup.performanceCountersSettings] Specifies - * settings for performance counters collecting and uploading. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference - * Specifies Azure Application Insights information for performance counters - * reporting. If provided, Batch AI will upload node performance counters to - * the corresponding Azure Application Insights account. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component - * Specifies the Azure Application Insights component resource id. - * - * @param {string} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] - * Value of the Azure Application Insights instrumentation key. - * - * @param {object} - * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] - * Specifies a KeyVault Secret containing Azure Application Insights - * instrumentation key. Specifies KeyVault Store and Secret which contains - * Azure Application Insights instrumentation key. One of instumentationKey or - * instrumentationKeySecretReference must be specified. - * - * @param {object} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. - * - * @param {string} - * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} parameters.userAccountSettings Settings for user account - * that will be created on all compute nodes of the cluster. - * - * @param {string} parameters.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} [parameters.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. - * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1666,7 +1080,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {Cluster} - The deserialized result object. + * @resolve {Experiment} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1674,20 +1088,20 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Cluster} [result] - The deserialized result object if an error did not occur. - * See {@link Cluster} for more information. + * {Experiment} [result] - The deserialized result object if an error did not occur. + * See {@link Experiment} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginCreate(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginCreate(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, callback: ServiceCallback): void; - beginCreate(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + create(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes a Cluster. + * Deletes an Experiment. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1696,10 +1110,9 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1712,10 +1125,10 @@ export interface Clusters { * * @reject {Error|ServiceError} - The error object. */ - beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes a Cluster. + * Deletes an Experiment. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -1724,10 +1137,9 @@ export interface Clusters { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} clusterName The name of the cluster within the specified - * resource group. Cluster names can only contain a combination of alphanumeric - * characters along with dash (-) and underscore (_). The name must be from 1 - * through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1755,16 +1167,24 @@ export interface Clusters { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginDeleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Clusters associated with the given subscription. + * Gets information about an Experiment. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1773,17 +1193,25 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Clusters associated with the given subscription. + * Gets information about an Experiment. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1797,7 +1225,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {ClusterListResult} - The deserialized result object. + * @resolve {Experiment} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1805,23 +1233,31 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ClusterListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * {Experiment} [result] - The deserialized result object if an error did not occur. + * See {@link Experiment} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listNext(nextPageLink: string, callback: ServiceCallback): void; - listNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + get(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Clusters within the specified resource group. + * Creates an Experiment. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1830,17 +1266,25 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByResourceGroupNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Clusters within the specified resource group. + * Creates an Experiment. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1854,7 +1298,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {ClusterListResult} - The deserialized result object. + * @resolve {Experiment} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1862,23 +1306,31 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ClusterListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * {Experiment} [result] - The deserialized result object if an error did not occur. + * See {@link Experiment} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByResourceGroupNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listByResourceGroupNext(nextPageLink: string, callback: ServiceCallback): void; - listByResourceGroupNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Get the IP address, port of all the compute nodes in the Cluster. + * Deletes an Experiment. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1887,17 +1339,25 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listRemoteLoginInformationNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Get the IP address, port of all the compute nodes in the Cluster. + * Deletes an Experiment. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -1911,7 +1371,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {RemoteLoginInformationListResult} - The deserialized result object. + * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1919,21 +1379,19 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {RemoteLoginInformationListResult} [result] - The deserialized result object if an error did not occur. - * See {@link RemoteLoginInformationListResult} for more - * information. + * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listRemoteLoginInformationNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listRemoteLoginInformationNext(nextPageLink: string, callback: ServiceCallback): void; - listRemoteLoginInformationNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets information about Clusters associated with the given Workspace. + * Gets a list of Experiments within the specified Workspace. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -1945,14 +1403,14 @@ export interface Clusters { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByWorkspaceNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listByWorkspaceNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets information about Clusters associated with the given Workspace. + * Gets a list of Experiments within the specified Workspace. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -1969,7 +1427,7 @@ export interface Clusters { * * {Promise} A promise is returned. * - * @resolve {ClusterListResult} - The deserialized result object. + * @resolve {ExperimentListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -1977,36 +1435,47 @@ export interface Clusters { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ClusterListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ClusterListResult} for more information. + * {ExperimentListResult} [result] - The deserialized result object if an error did not occur. + * See {@link ExperimentListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByWorkspaceNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listByWorkspaceNext(nextPageLink: string, callback: ServiceCallback): void; - listByWorkspaceNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByWorkspaceNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listByWorkspaceNext(nextPageLink: string, callback: ServiceCallback): void; + listByWorkspaceNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; } /** * @class - * FileServers + * Jobs * __NOTE__: An instance of this class is automatically created for an * instance of the BatchAIManagementClient. */ -export interface FileServers { +export interface Jobs { /** - * Gets a list of File Servers associated with the given subscription. + * Gets a list of Jobs within the specified Experiment. + * + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.fileServersListOptions] Additional parameters for - * the operation + * @param {object} [options.jobsListByExperimentOptions] Additional parameters + * for the operation * - * @param {number} [options.fileServersListOptions.maxResults] The maximum + * @param {number} [options.jobsListByExperimentOptions.maxResults] The maximum * number of items to return in the response. A maximum of 1000 files can be * returned. * @@ -2015,21 +1484,32 @@ export interface FileServers { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listWithHttpOperationResponse(options?: { fileServersListOptions? : models.FileServersListOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; + listByExperimentWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { jobsListByExperimentOptions? : models.JobsListByExperimentOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of File Servers associated with the given subscription. + * Gets a list of Jobs within the specified Experiment. + * + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.fileServersListOptions] Additional parameters for - * the operation + * @param {object} [options.jobsListByExperimentOptions] Additional parameters + * for the operation * - * @param {number} [options.fileServersListOptions.maxResults] The maximum + * @param {number} [options.jobsListByExperimentOptions.maxResults] The maximum * number of items to return in the response. A maximum of 1000 files can be * returned. * @@ -2043,7 +1523,7 @@ export interface FileServers { * * {Promise} A promise is returned. * - * @resolve {FileServerListResult} - The deserialized result object. + * @resolve {JobListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -2051,164 +1531,322 @@ export interface FileServers { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {FileServerListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * {JobListResult} [result] - The deserialized result object if an error did not occur. + * See {@link JobListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - list(options?: { fileServersListOptions? : models.FileServersListOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - list(callback: ServiceCallback): void; - list(options: { fileServersListOptions? : models.FileServersListOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByExperiment(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { jobsListByExperimentOptions? : models.JobsListByExperimentOptions, customHeaders? : { [headerName: string]: string; } }): Promise; + listByExperiment(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; + listByExperiment(resourceGroupName: string, workspaceName: string, experimentName: string, options: { jobsListByExperimentOptions? : models.JobsListByExperimentOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of File Servers within the specified resource group. + * Creates a Job in the given Experiment. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. * - * @param {object} [options] Optional Parameters. + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [options.fileServersListByResourceGroupOptions] Additional - * parameters for the operation + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {number} [options.fileServersListByResourceGroupOptions.maxResults] - * The maximum number of items to return in the response. A maximum of 1000 - * files can be returned. + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} parameters The parameters to provide for job creation. * - * @returns {Promise} A promise is returned + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @reject {Error|ServiceError} - The error object. - */ - listByResourceGroupWithHttpOperationResponse(resourceGroupName: string, options?: { fileServersListByResourceGroupOptions? : models.FileServersListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets a list of File Servers within the specified resource group. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * - * @param {object} [options] Optional Parameters. + * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {object} [options.fileServersListByResourceGroupOptions] Additional - * parameters for the operation + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {number} [options.fileServersListByResourceGroupOptions.maxResults] - * The maximum number of items to return in the response. A maximum of 1000 - * files can be returned. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to + * download the container from. * - * {Promise} A promise is returned. + * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] + * Server URL. URL for image repository. * - * @resolve {FileServerListResult} - The deserialized result object. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * - * @reject {Error|ServiceError} - The error object. + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.username User + * name. User name to login to the repository. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {string} + * [parameters.containerSettings.imageSourceRegistry.credentials.password] + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * - * {FileServerListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {object} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - listByResourceGroup(resourceGroupName: string, options?: { fileServersListByResourceGroupOptions? : models.FileServersListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - listByResourceGroup(resourceGroupName: string, callback: ServiceCallback): void; - listByResourceGroup(resourceGroupName: string, options: { fileServersListByResourceGroupOptions? : models.FileServersListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Creates a File Server in the given workspace. + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id + * The ID of the resource * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK + * (aka Microsoft Cognitive Toolkit) job. * - * @param {object} parameters The parameters to provide for File Server - * creation. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {string} parameters.location The region in which to create the File - * Server. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. + * @param {string} [parameters.cntkSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for + * pyTorch job. * - * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. + * + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. + * + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. + * + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. + * + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. + * + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if + * pythonScriptFilePath is specified. + * + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be + * specified if configFilePath is specified. + * + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. + * + * @param {string} [parameters.caffeSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the Caffe job. + * + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for + * Caffe2 job. * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' + * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for + * Chainer job. * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} [parameters.chainerSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. + * + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. + * + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. + * + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. + * + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. + * + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for + * Horovod job. + * + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. + * + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.horovodSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. + * + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. + * + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. + * + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. + * + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. + * + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. + * + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. + * + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. + * + * @param {object} [parameters.constraints] Constraints associated with the + * Job. + * + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -2217,14 +1855,14 @@ export interface FileServers { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates a File Server in the given workspace. + * Creates a Job in the given Experiment. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2233,71 +1871,300 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} parameters The parameters to provide for File Server - * creation. + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. + * + * @param {object} parameters The parameters to provide for job creation. + * + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' + * + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. + * + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. + * + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. + * + * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {string} parameters.location The region in which to create the File - * Server. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to + * download the container from. + * + * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] + * Server URL. URL for image repository. + * + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. + * parameters.containerSettings.imageSourceRegistry.credentials.username User + * name. User name to login to the repository. * * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. + * [parameters.containerSettings.imageSourceRegistry.credentials.password] + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. + * + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. + * + * @param {object} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. + * + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id + * The ID of the resource * * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK + * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} [parameters.cntkSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. + * + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property + * + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for + * pyTorch job. + * + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. + * + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. + * + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. + * + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. + * + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. + * + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. + * + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. + * + * @param {string} + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if + * pythonScriptFilePath is specified. + * + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be + * specified if configFilePath is specified. + * + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. + * + * @param {string} [parameters.caffeSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the Caffe job. + * + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for + * Caffe2 job. + * + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. + * + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. + * + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for + * Chainer job. + * + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. + * + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.chainerSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. + * + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. + * + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. + * + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. + * + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. + * + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for + * Horovod job. + * + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. + * + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. + * + * @param {string} [parameters.horovodSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. + * + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property + * + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. + * + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. + * + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. + * + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. + * + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. + * + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. + * + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. + * + * @param {object} [parameters.constraints] Constraints associated with the + * Job. + * + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -2311,7 +2178,7 @@ export interface FileServers { * * {Promise} A promise is returned. * - * @resolve {FileServer} - The deserialized result object. + * @resolve {Job} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -2319,20 +2186,20 @@ export interface FileServers { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {FileServer} [result] - The deserialized result object if an error did not occur. - * See {@link FileServer} for more information. + * {Job} [result] - The deserialized result object if an error did not occur. + * See {@link Job} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - create(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - create(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, callback: ServiceCallback): void; - create(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + create(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes a File Server. + * Deletes a Job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2341,10 +2208,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2357,10 +2228,10 @@ export interface FileServers { * * @reject {Error|ServiceError} - The error object. */ - deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes a File Server. + * Deletes a Job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2369,10 +2240,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2400,13 +2275,13 @@ export interface FileServers { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - deleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - deleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, callback: ServiceCallback): void; - deleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets information about a File Server. + * Gets information about a Job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2415,10 +2290,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2427,14 +2306,14 @@ export interface FileServers { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets information about a File Server. + * Gets information about a Job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2443,10 +2322,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2460,7 +2343,7 @@ export interface FileServers { * * {Promise} A promise is returned. * - * @resolve {FileServer} - The deserialized result object. + * @resolve {Job} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -2468,20 +2351,22 @@ export interface FileServers { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {FileServer} [result] - The deserialized result object if an error did not occur. - * See {@link FileServer} for more information. + * {Job} [result] - The deserialized result object if an error did not occur. + * See {@link Job} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - get(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - get(resourceGroupName: string, workspaceName: string, fileServerName: string, callback: ServiceCallback): void; - get(resourceGroupName: string, workspaceName: string, fileServerName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + get(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of File Servers associated with the specified workspace. + * List all directories and files inside the given directory of the Job's + * output directory (if the output directory is on Azure File Share or Azure + * Storage Container). * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2490,28 +2375,48 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [options] Optional Parameters. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [options.fileServersListByWorkspaceOptions] Additional - * parameters for the operation + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * - * @param {number} [options.fileServersListByWorkspaceOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. + * @param {object} jobsListOutputFilesOptions Additional parameters for the + * operation + * + * @param {string} jobsListOutputFilesOptions.outputdirectoryid Id of the job + * output directory. This is the OutputDirectory-->id parameter that is given + * by the user during Create Job. + * + * @param {string} [jobsListOutputFilesOptions.directory] The path to the + * directory. + * + * @param {number} [jobsListOutputFilesOptions.linkexpiryinminutes] The number + * of minutes after which the download link will expire. + * + * @param {number} [jobsListOutputFilesOptions.maxResults] The maximum number + * of items to return in the response. A maximum of 1000 files can be returned. + * + * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByWorkspaceWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { fileServersListByWorkspaceOptions? : models.FileServersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; + listOutputFilesWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of File Servers associated with the specified workspace. + * List all directories and files inside the given directory of the Job's + * output directory (if the output directory is on Azure File Share or Azure + * Storage Container). * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2520,14 +2425,32 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [options] Optional Parameters. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [options.fileServersListByWorkspaceOptions] Additional - * parameters for the operation + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * - * @param {number} [options.fileServersListByWorkspaceOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. + * @param {object} jobsListOutputFilesOptions Additional parameters for the + * operation + * + * @param {string} jobsListOutputFilesOptions.outputdirectoryid Id of the job + * output directory. This is the OutputDirectory-->id parameter that is given + * by the user during Create Job. + * + * @param {string} [jobsListOutputFilesOptions.directory] The path to the + * directory. + * + * @param {number} [jobsListOutputFilesOptions.linkexpiryinminutes] The number + * of minutes after which the download link will expire. + * + * @param {number} [jobsListOutputFilesOptions.maxResults] The maximum number + * of items to return in the response. A maximum of 1000 files can be returned. + * + * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -2539,7 +2462,7 @@ export interface FileServers { * * {Promise} A promise is returned. * - * @resolve {FileServerListResult} - The deserialized result object. + * @resolve {FileListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -2547,20 +2470,22 @@ export interface FileServers { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {FileServerListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * {FileListResult} [result] - The deserialized result object if an error did not occur. + * See {@link FileListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByWorkspace(resourceGroupName: string, workspaceName: string, options?: { fileServersListByWorkspaceOptions? : models.FileServersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - listByWorkspace(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; - listByWorkspace(resourceGroupName: string, workspaceName: string, options: { fileServersListByWorkspaceOptions? : models.FileServersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listOutputFiles(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listOutputFiles(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, callback: ServiceCallback): void; + listOutputFiles(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Creates a File Server in the given workspace. + * Gets a list of currently existing nodes which were used for the Job + * execution. The returned information contains the node ID, its public IP and + * SSH port. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2569,71 +2494,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. - * - * @param {object} parameters The parameters to provide for File Server - * creation. - * - * @param {string} parameters.location The region in which to create the File - * Server. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. - * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). - * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. - * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. - * - * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. - * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. - * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' - * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. - * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2642,14 +2510,16 @@ export interface FileServers { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listRemoteLoginInformationWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates a File Server in the given workspace. + * Gets a list of currently existing nodes which were used for the Job + * execution. The returned information contains the node ID, its public IP and + * SSH port. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2658,71 +2528,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. - * - * @param {object} parameters The parameters to provide for File Server - * creation. - * - * @param {string} parameters.location The region in which to create the File - * Server. - * - * @param {object} [parameters.tags] The user specified tags associated with - * the File Server. - * - * @param {string} parameters.vmSize The size of the virtual machine of the - * file server. For information about available VM sizes for fileservers from - * the Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). - * - * @param {object} parameters.sshConfiguration SSH configuration for the file - * server. - * - * @param {array} [parameters.sshConfiguration.publicIPsToAllow] List of source - * IP ranges to allow SSH connection to a node. Default value is '*' can be - * used to match all source IPs. Maximum number of IP ranges that can be - * specified are 400. - * - * @param {object} parameters.sshConfiguration.userAccountSettings Settings for - * user account to be created on a node. - * - * @param {string} - * parameters.sshConfiguration.userAccountSettings.adminUserName Specifies the - * name of the administrator account. - * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH - * public keys used to authenticate with linux based VMs. This does not get - * returned in a GET response body. - * - * @param {string} - * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] Admin - * user Password (linux only). This does not get returned in a GET response - * body. - * - * @param {object} parameters.dataDisks Settings for the data disk which would - * be created for the file server. - * - * @param {number} parameters.dataDisks.diskSizeInGB Initial disk size in GB - * for blank data disks, and the new desired size for resizing existing data - * disks. - * - * @param {string} [parameters.dataDisks.cachingType] None, ReadOnly, - * ReadWrite. Default value is None. This property is not patchable. Possible - * values include: 'none', 'readonly', 'readwrite' - * - * @param {number} parameters.dataDisks.diskCount Number of data disks to be - * attached to the VM. RAID level 0 will be applied in the case of multiple - * disks. - * - * @param {string} parameters.dataDisks.storageAccountType Specifies the type - * of storage account to be used on the disk. Possible values are: Standard_LRS - * or Premium_LRS. Possible values include: 'Standard_LRS', 'Premium_LRS' - * - * @param {object} [parameters.subnet] Specifies the identifier of the subnet. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} parameters.subnet.id The ID of the resource + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2736,7 +2549,7 @@ export interface FileServers { * * {Promise} A promise is returned. * - * @resolve {FileServer} - The deserialized result object. + * @resolve {RemoteLoginInformationListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -2744,20 +2557,21 @@ export interface FileServers { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {FileServer} [result] - The deserialized result object if an error did not occur. - * See {@link FileServer} for more information. + * {RemoteLoginInformationListResult} [result] - The deserialized result object if an error did not occur. + * See {@link RemoteLoginInformationListResult} for more + * information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginCreate(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginCreate(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, callback: ServiceCallback): void; - beginCreate(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; + listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes a File Server. + * Terminates a job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2766,10 +2580,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2782,10 +2600,10 @@ export interface FileServers { * * @reject {Error|ServiceError} - The error object. */ - beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + terminateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes a File Server. + * Terminates a job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -2794,10 +2612,14 @@ export interface FileServers { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} fileServerName The name of the file server within the - * specified resource group. File server names can only contain a combination - * of alphanumeric characters along with dash (-) and underscore (_). The name - * must be from 1 through 64 characters long. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -2825,504 +2647,633 @@ export interface FileServers { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginDeleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, callback: ServiceCallback): void; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + terminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + terminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; + terminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of File Servers associated with the given subscription. + * Creates a Job in the given Experiment. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. * - * @param {object} [options] Optional Parameters. + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @returns {Promise} A promise is returned + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {object} parameters The parameters to provide for job creation. * - * @reject {Error|ServiceError} - The error object. - */ - listNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets a list of File Servers associated with the given subscription. + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @param {object} [options] Optional Parameters. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * {Promise} A promise is returned. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @resolve {FileServerListResult} - The deserialized result object. + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @reject {Error|ServiceError} - The error object. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to + * download the container from. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. - * - * {FileServerListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] + * Server URL. URL for image repository. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - listNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listNext(nextPageLink: string, callback: ServiceCallback): void; - listNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Gets a list of File Servers within the specified resource group. + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.username User + * name. User name to login to the repository. * - * @param {object} [options] Optional Parameters. + * @param {string} + * [parameters.containerSettings.imageSourceRegistry.credentials.password] + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * - * @returns {Promise} A promise is returned + * @param {object} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id + * The ID of the resource * - * @reject {Error|ServiceError} - The error object. - */ - listByResourceGroupNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets a list of File Servers within the specified resource group. + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {object} [options] Optional Parameters. + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK + * (aka Microsoft Cognitive Toolkit) job. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * {Promise} A promise is returned. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * - * @resolve {FileServerListResult} - The deserialized result object. + * @param {string} [parameters.cntkSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @reject {Error|ServiceError} - The error object. + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for + * pyTorch job. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * {FileServerListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - listByResourceGroupNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listByResourceGroupNext(nextPageLink: string, callback: ServiceCallback): void; - listByResourceGroupNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Gets a list of File Servers associated with the specified workspace. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * @param {object} [options] Optional Parameters. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @returns {Promise} A promise is returned + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @reject {Error|ServiceError} - The error object. - */ - listByWorkspaceNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets a list of File Servers associated with the specified workspace. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if + * pythonScriptFilePath is specified. * - * @param {object} [options] Optional Parameters. + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be + * specified if configFilePath is specified. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {string} [parameters.caffeSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * {Promise} A promise is returned. + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for + * Caffe2 job. * - * @resolve {FileServerListResult} - The deserialized result object. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @reject {Error|ServiceError} - The error object. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for + * Chainer job. * - * {FileServerListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileServerListResult} for more information. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - listByWorkspaceNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listByWorkspaceNext(nextPageLink: string, callback: ServiceCallback): void; - listByWorkspaceNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; -} - -/** - * @class - * Workspaces - * __NOTE__: An instance of this class is automatically created for an - * instance of the BatchAIManagementClient. - */ -export interface Workspaces { - - - /** - * Gets a list of Workspaces associated with the given subscription. + * @param {string} [parameters.chainerSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [options] Optional Parameters. + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {object} [options.workspacesListOptions] Additional parameters for - * the operation + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * @param {number} [options.workspacesListOptions.maxResults] The maximum - * number of items to return in the response. A maximum of 1000 files can be - * returned. + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * @returns {Promise} A promise is returned + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @reject {Error|ServiceError} - The error object. - */ - listWithHttpOperationResponse(options?: { workspacesListOptions? : models.WorkspacesListOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets a list of Workspaces associated with the given subscription. + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for + * Horovod job. * - * @param {object} [options] Optional Parameters. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {object} [options.workspacesListOptions] Additional parameters for - * the operation + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {number} [options.workspacesListOptions.maxResults] The maximum - * number of items to return in the response. A maximum of 1000 files can be - * returned. + * @param {string} [parameters.horovodSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * {Promise} A promise is returned. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @resolve {WorkspaceListResult} - The deserialized result object. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @reject {Error|ServiceError} - The error object. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. * - * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. - * See {@link WorkspaceListResult} for more information. - * - * {WebResource} [request] - The HTTP Request object if an error did not occur. - * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - list(options?: { workspacesListOptions? : models.WorkspacesListOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - list(callback: ServiceCallback): void; - list(options: { workspacesListOptions? : models.WorkspacesListOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Gets a list of Workspaces within the specified resource group. + * @param {object} [parameters.constraints] Constraints associated with the + * Job. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.workspacesListByResourceGroupOptions] Additional - * parameters for the operation - * - * @param {number} [options.workspacesListByResourceGroupOptions.maxResults] - * The maximum number of items to return in the response. A maximum of 1000 - * files can be returned. - * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByResourceGroupWithHttpOperationResponse(resourceGroupName: string, options?: { workspacesListByResourceGroupOptions? : models.WorkspacesListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; + beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Workspaces within the specified resource group. + * Creates a Job in the given Experiment. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. * - * @param {object} [options] Optional Parameters. + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [options.workspacesListByResourceGroupOptions] Additional - * parameters for the operation + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {number} [options.workspacesListByResourceGroupOptions.maxResults] - * The maximum number of items to return in the response. A maximum of 1000 - * files can be returned. + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} parameters The parameters to provide for job creation. * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * {Promise} A promise is returned. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @resolve {WorkspaceListResult} - The deserialized result object. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * - * @reject {Error|ServiceError} - The error object. + * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. - * See {@link WorkspaceListResult} for more information. + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - listByResourceGroup(resourceGroupName: string, options?: { workspacesListByResourceGroupOptions? : models.WorkspacesListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - listByResourceGroup(resourceGroupName: string, callback: ServiceCallback): void; - listByResourceGroup(resourceGroupName: string, options: { workspacesListByResourceGroupOptions? : models.WorkspacesListByResourceGroupOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Creates a Workspace. + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to + * download the container from. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] + * Server URL. URL for image repository. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * - * @param {object} parameters Workspace creation parameters. + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * - * @param {string} parameters.location The region in which to create the - * Workspace. + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.username User + * name. User name to login to the repository. * - * @param {object} [parameters.tags] The user specified tags associated with - * the Workspace. + * @param {string} + * [parameters.containerSettings.imageSourceRegistry.credentials.password] + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * - * @param {object} [options] Optional Parameters. + * @param {object} + * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * - * @returns {Promise} A promise is returned + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id + * The ID of the resource * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {string} + * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @reject {Error|ServiceError} - The error object. - */ - createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Creates a Workspace. + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK + * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {object} parameters Workspace creation parameters. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} parameters.location The region in which to create the - * Workspace. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {object} [parameters.tags] The user specified tags associated with - * the Workspace. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * - * @param {object} [options] Optional Parameters. + * @param {string} [parameters.cntkSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for + * pyTorch job. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * {Promise} A promise is returned. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @resolve {Workspace} - The deserialized result object. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @reject {Error|ServiceError} - The error object. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * {Workspace} [result] - The deserialized result object if an error did not occur. - * See {@link Workspace} for more information. + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - create(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - create(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, callback: ServiceCallback): void; - create(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Deletes a Workspace. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if + * pythonScriptFilePath is specified. * - * @param {object} [options] Optional Parameters. + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be + * specified if configFilePath is specified. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * - * @returns {Promise} A promise is returned + * @param {string} [parameters.caffeSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @reject {Error|ServiceError} - The error object. - */ - deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Deletes a Workspace. + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for + * Caffe2 job. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {object} [options] Optional Parameters. + * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for + * Chainer job. * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * {Promise} A promise is returned. + * @param {string} [parameters.chainerSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @resolve {null} - The deserialized result object. + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @reject {Error|ServiceError} - The error object. + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * {null} [result] - The deserialized result object if an error did not occur. + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - deleteMethod(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - deleteMethod(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; - deleteMethod(resourceGroupName: string, workspaceName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Gets information about a Workspace. + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for + * Horovod job. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {object} [options] Optional Parameters. + * @param {string} [parameters.horovodSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @returns {Promise} A promise is returned + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * @reject {Error|ServiceError} - The error object. - */ - getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets information about a Workspace. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. + * + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. + * + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. + * + * @param {object} [parameters.constraints] Constraints associated with the + * Job. + * + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -3336,7 +3287,7 @@ export interface Workspaces { * * {Promise} A promise is returned. * - * @resolve {Workspace} - The deserialized result object. + * @resolve {Job} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3344,20 +3295,20 @@ export interface Workspaces { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Workspace} [result] - The deserialized result object if an error did not occur. - * See {@link Workspace} for more information. + * {Job} [result] - The deserialized result object if an error did not occur. + * See {@link Job} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - get(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - get(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; - get(resourceGroupName: string, workspaceName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Creates a Workspace. + * Deletes a Job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3366,13 +3317,14 @@ export interface Workspaces { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} parameters Workspace creation parameters. - * - * @param {string} parameters.location The region in which to create the - * Workspace. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [parameters.tags] The user specified tags associated with - * the Workspace. + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -3381,14 +3333,14 @@ export interface Workspaces { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates a Workspace. + * Deletes a Job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3397,13 +3349,14 @@ export interface Workspaces { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} parameters Workspace creation parameters. - * - * @param {string} parameters.location The region in which to create the - * Workspace. + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. * - * @param {object} [parameters.tags] The user specified tags associated with - * the Workspace. + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. * * @param {object} [options] Optional Parameters. * @@ -3417,7 +3370,7 @@ export interface Workspaces { * * {Promise} A promise is returned. * - * @resolve {Workspace} - The deserialized result object. + * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3425,20 +3378,19 @@ export interface Workspaces { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Workspace} [result] - The deserialized result object if an error did not occur. - * See {@link Workspace} for more information. + * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginCreate(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginCreate(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, callback: ServiceCallback): void; - beginCreate(resourceGroupName: string, workspaceName: string, parameters: models.WorkspaceCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes a Workspace. + * Terminates a job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3447,6 +3399,15 @@ export interface Workspaces { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -3458,10 +3419,10 @@ export interface Workspaces { * * @reject {Error|ServiceError} - The error object. */ - beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginTerminateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes a Workspace. + * Terminates a job. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3470,6 +3431,15 @@ export interface Workspaces { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * + * @param {string} experimentName The name of the experiment. Experiment names + * can only contain a combination of alphanumeric characters along with dash + * (-) and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} jobName The name of the job within the specified resource + * group. Job names can only contain a combination of alphanumeric characters + * along with dash (-) and underscore (_). The name must be from 1 through 64 + * characters long. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -3496,13 +3466,13 @@ export interface Workspaces { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginDeleteMethod(resourceGroupName: string, workspaceName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginTerminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginTerminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; + beginTerminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Workspaces associated with the given subscription. + * Gets a list of Jobs within the specified Experiment. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -3514,14 +3484,14 @@ export interface Workspaces { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listByExperimentNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Workspaces associated with the given subscription. + * Gets a list of Jobs within the specified Experiment. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -3538,7 +3508,7 @@ export interface Workspaces { * * {Promise} A promise is returned. * - * @resolve {WorkspaceListResult} - The deserialized result object. + * @resolve {JobListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3546,20 +3516,22 @@ export interface Workspaces { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. - * See {@link WorkspaceListResult} for more information. + * {JobListResult} [result] - The deserialized result object if an error did not occur. + * See {@link JobListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listNext(nextPageLink: string, callback: ServiceCallback): void; - listNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByExperimentNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listByExperimentNext(nextPageLink: string, callback: ServiceCallback): void; + listByExperimentNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Workspaces within the specified resource group. + * List all directories and files inside the given directory of the Job's + * output directory (if the output directory is on Azure File Share or Azure + * Storage Container). * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -3571,14 +3543,16 @@ export interface Workspaces { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByResourceGroupNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listOutputFilesNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Workspaces within the specified resource group. + * List all directories and files inside the given directory of the Job's + * output directory (if the output directory is on Azure File Share or Azure + * Storage Container). * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -3595,7 +3569,7 @@ export interface Workspaces { * * {Promise} A promise is returned. * - * @resolve {WorkspaceListResult} - The deserialized result object. + * @resolve {FileListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3603,76 +3577,49 @@ export interface Workspaces { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {WorkspaceListResult} [result] - The deserialized result object if an error did not occur. - * See {@link WorkspaceListResult} for more information. + * {FileListResult} [result] - The deserialized result object if an error did not occur. + * See {@link FileListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByResourceGroupNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listByResourceGroupNext(nextPageLink: string, callback: ServiceCallback): void; - listByResourceGroupNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; -} - -/** - * @class - * Experiments - * __NOTE__: An instance of this class is automatically created for an - * instance of the BatchAIManagementClient. - */ -export interface Experiments { + listOutputFilesNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listOutputFilesNext(nextPageLink: string, callback: ServiceCallback): void; + listOutputFilesNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Experiments within the specified Workspace. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * Gets a list of currently existing nodes which were used for the Job + * execution. The returned information contains the node ID, its public IP and + * SSH port. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.experimentsListByWorkspaceOptions] Additional - * parameters for the operation - * - * @param {number} [options.experimentsListByWorkspaceOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. - * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByWorkspaceWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { experimentsListByWorkspaceOptions? : models.ExperimentsListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; + listRemoteLoginInformationNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Experiments within the specified Workspace. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * Gets a list of currently existing nodes which were used for the Job + * execution. The returned information contains the node ID, its public IP and + * SSH port. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.experimentsListByWorkspaceOptions] Additional - * parameters for the operation - * - * @param {number} [options.experimentsListByWorkspaceOptions.maxResults] The - * maximum number of items to return in the response. A maximum of 1000 files - * can be returned. - * * @param {object} [options.customHeaders] Headers that will be added to the * request * @@ -3683,7 +3630,7 @@ export interface Experiments { * * {Promise} A promise is returned. * - * @resolve {ExperimentListResult} - The deserialized result object. + * @resolve {RemoteLoginInformationListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3691,20 +3638,30 @@ export interface Experiments { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ExperimentListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ExperimentListResult} for more information. + * {RemoteLoginInformationListResult} [result] - The deserialized result object if an error did not occur. + * See {@link RemoteLoginInformationListResult} for more + * information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByWorkspace(resourceGroupName: string, workspaceName: string, options?: { experimentsListByWorkspaceOptions? : models.ExperimentsListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - listByWorkspace(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; - listByWorkspace(resourceGroupName: string, workspaceName: string, options: { experimentsListByWorkspaceOptions? : models.ExperimentsListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listRemoteLoginInformationNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listRemoteLoginInformationNext(nextPageLink: string, callback: ServiceCallback): void; + listRemoteLoginInformationNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; +} + +/** + * @class + * FileServers + * __NOTE__: An instance of this class is automatically created for an + * instance of the BatchAIManagementClient. + */ +export interface FileServers { /** - * Creates an Experiment. + * Creates a File Server in the given workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3713,9 +3670,68 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. + * + * @param {object} parameters The parameters to provide for File Server + * creation. + * + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. + * + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. + * + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. + * + * @param {string} + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. + * + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. + * + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. + * + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' + * + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. + * + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' + * + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. + * + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -3724,14 +3740,14 @@ export interface Experiments { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates an Experiment. + * Creates a File Server in the given workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3740,9 +3756,68 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. + * + * @param {object} parameters The parameters to provide for File Server + * creation. + * + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. + * + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. + * + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. + * + * @param {string} + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. + * + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. + * + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. + * + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' + * + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. + * + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' + * + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. + * + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -3756,7 +3831,7 @@ export interface Experiments { * * {Promise} A promise is returned. * - * @resolve {Experiment} - The deserialized result object. + * @resolve {FileServer} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3764,20 +3839,20 @@ export interface Experiments { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Experiment} [result] - The deserialized result object if an error did not occur. - * See {@link Experiment} for more information. + * {FileServer} [result] - The deserialized result object if an error did not occur. + * See {@link FileServer} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - create(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - create(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; - create(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + create(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes an Experiment. + * Deletes a File Server. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3786,9 +3861,10 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -3801,10 +3877,10 @@ export interface Experiments { * * @reject {Error|ServiceError} - The error object. */ - deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes an Experiment. + * Deletes a File Server. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3813,9 +3889,10 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -3843,13 +3920,13 @@ export interface Experiments { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; - deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + deleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets information about an Experiment. + * Gets information about a File Server. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3858,9 +3935,10 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -3869,14 +3947,14 @@ export interface Experiments { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets information about an Experiment. + * Gets information about a File Server. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3885,9 +3963,10 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -3901,7 +3980,7 @@ export interface Experiments { * * {Promise} A promise is returned. * - * @resolve {Experiment} - The deserialized result object. + * @resolve {FileServer} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3909,20 +3988,20 @@ export interface Experiments { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Experiment} [result] - The deserialized result object if an error did not occur. - * See {@link Experiment} for more information. + * {FileServer} [result] - The deserialized result object if an error did not occur. + * See {@link FileServer} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - get(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - get(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; - get(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + get(resourceGroupName: string, workspaceName: string, fileServerName: string, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, fileServerName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Creates an Experiment. + * Gets a list of File Servers associated with the specified workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3931,25 +4010,28 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * * @param {object} [options] Optional Parameters. * + * @param {object} [options.fileServersListByWorkspaceOptions] Additional + * parameters for the operation + * + * @param {number} [options.fileServersListByWorkspaceOptions.maxResults] The + * maximum number of items to return in the response. A maximum of 1000 files + * can be returned. + * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listByWorkspaceWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { fileServersListByWorkspaceOptions? : models.FileServersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates an Experiment. + * Gets a list of File Servers associated with the specified workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -3958,12 +4040,15 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * * @param {object} [options] Optional Parameters. * + * @param {object} [options.fileServersListByWorkspaceOptions] Additional + * parameters for the operation + * + * @param {number} [options.fileServersListByWorkspaceOptions.maxResults] The + * maximum number of items to return in the response. A maximum of 1000 files + * can be returned. + * * @param {object} [options.customHeaders] Headers that will be added to the * request * @@ -3974,7 +4059,7 @@ export interface Experiments { * * {Promise} A promise is returned. * - * @resolve {Experiment} - The deserialized result object. + * @resolve {FileServerListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -3982,20 +4067,20 @@ export interface Experiments { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Experiment} [result] - The deserialized result object if an error did not occur. - * See {@link Experiment} for more information. + * {FileServerListResult} [result] - The deserialized result object if an error did not occur. + * See {@link FileServerListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; - beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByWorkspace(resourceGroupName: string, workspaceName: string, options?: { fileServersListByWorkspaceOptions? : models.FileServersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise; + listByWorkspace(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; + listByWorkspace(resourceGroupName: string, workspaceName: string, options: { fileServersListByWorkspaceOptions? : models.FileServersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes an Experiment. + * Creates a File Server in the given workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4004,9 +4089,68 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. + * + * @param {object} parameters The parameters to provide for File Server + * creation. + * + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. + * + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. + * + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. + * + * @param {string} + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. + * + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. + * + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. + * + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' + * + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. + * + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' + * + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. + * + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -4015,14 +4159,14 @@ export interface Experiments { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes an Experiment. + * Creates a File Server in the given workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4031,9 +4175,68 @@ export interface Experiments { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. + * + * @param {object} parameters The parameters to provide for File Server + * creation. + * + * @param {string} parameters.vmSize VM size. The size of the virtual machine + * for the File Server. For information about available VM sizes from the + * Virtual Machines Marketplace, see Sizes for Virtual Machines (Linux). + * + * @param {object} parameters.sshConfiguration SSH configuration. SSH + * configuration for the File Server node. + * + * @param {array} [parameters.sshConfiguration.publicIPsToAllow] Allowed public + * IPs. List of source IP ranges to allow SSH connection from. The default + * value is '*' (all source IPs are allowed). Maximum number of IP ranges that + * can be specified is 400. + * + * @param {object} parameters.sshConfiguration.userAccountSettings User account + * settings. Settings for administrator user account to be created on a node. + * The account can be used to establish SSH connection to the node. + * + * @param {string} + * parameters.sshConfiguration.userAccountSettings.adminUserName User name. + * Name of the administrator user account which can be used to SSH to nodes. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. + * + * @param {string} + * [parameters.sshConfiguration.userAccountSettings.adminUserPassword] + * Password. Password of the administrator user account. + * + * @param {object} parameters.dataDisks Data disks. Settings for the data disks + * which will be created for the File Server. + * + * @param {number} parameters.dataDisks.diskSizeInGB Disk size in GB. Disk size + * in GB for the blank data disks. + * + * @param {string} [parameters.dataDisks.cachingType] Caching type. Caching + * type for the disks. Available values are none (default), readonly, + * readwrite. Caching type can be set only for VM sizes supporting premium + * storage. Possible values include: 'none', 'readonly', 'readwrite' + * + * @param {number} parameters.dataDisks.diskCount Number of data disks. Number + * of data disks attached to the File Server. If multiple disks attached, they + * will be configured in RAID level 0. + * + * @param {string} parameters.dataDisks.storageAccountType Storage account + * type. Type of storage account to be used on the disk. Possible values are: + * Standard_LRS or Premium_LRS. Premium storage account type can only be used + * with VM sizes supporting premium storage. Possible values include: + * 'Standard_LRS', 'Premium_LRS' + * + * @param {object} [parameters.subnet] Subnet identifier. Identifier of an + * existing virtual network subnet to put the File Server in. If not provided, + * a new virtual network and subnet will be created. + * + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -4047,7 +4250,7 @@ export interface Experiments { * * {Promise} A promise is returned. * - * @resolve {null} - The deserialized result object. + * @resolve {FileServer} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -4055,22 +4258,32 @@ export interface Experiments { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {null} [result] - The deserialized result object if an error did not occur. + * {FileServer} [result] - The deserialized result object if an error did not occur. + * See {@link FileServer} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginCreate(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, fileServerName: string, parameters: models.FileServerCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Experiments within the specified Workspace. + * Deletes a File Server. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -4079,17 +4292,26 @@ export interface Experiments { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByWorkspaceNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of Experiments within the specified Workspace. + * Deletes a File Server. * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {string} fileServerName The name of the file server within the + * specified resource group. File server names can only contain a combination + * of alphanumeric characters along with dash (-) and underscore (_). The name + * must be from 1 through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -4103,7 +4325,7 @@ export interface Experiments { * * {Promise} A promise is returned. * - * @resolve {ExperimentListResult} - The deserialized result object. + * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -4111,83 +4333,43 @@ export interface Experiments { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {ExperimentListResult} [result] - The deserialized result object if an error did not occur. - * See {@link ExperimentListResult} for more information. + * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByWorkspaceNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listByWorkspaceNext(nextPageLink: string, callback: ServiceCallback): void; - listByWorkspaceNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; -} - -/** - * @class - * Jobs - * __NOTE__: An instance of this class is automatically created for an - * instance of the BatchAIManagementClient. - */ -export interface Jobs { + beginDeleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, fileServerName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of Jobs within the specified Experiment. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * Gets a list of File Servers associated with the specified workspace. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * * @param {object} [options] Optional Parameters. * - * @param {object} [options.jobsListByExperimentOptions] Additional parameters - * for the operation - * - * @param {number} [options.jobsListByExperimentOptions.maxResults] The maximum - * number of items to return in the response. A maximum of 1000 files can be - * returned. - * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listByExperimentWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { jobsListByExperimentOptions? : models.JobsListByExperimentOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets a list of Jobs within the specified Experiment. - * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. - * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {object} [options] Optional Parameters. + listByWorkspaceNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + + /** + * Gets a list of File Servers associated with the specified workspace. * - * @param {object} [options.jobsListByExperimentOptions] Additional parameters - * for the operation + * @param {string} nextPageLink The NextLink from the previous successful call + * to List operation. * - * @param {number} [options.jobsListByExperimentOptions.maxResults] The maximum - * number of items to return in the response. A maximum of 1000 files can be - * returned. + * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -4199,7 +4381,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {JobListResult} - The deserialized result object. + * @resolve {FileServerListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -4207,20 +4389,29 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {JobListResult} [result] - The deserialized result object if an error did not occur. - * See {@link JobListResult} for more information. + * {FileServerListResult} [result] - The deserialized result object if an error did not occur. + * See {@link FileServerListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listByExperiment(resourceGroupName: string, workspaceName: string, experimentName: string, options?: { jobsListByExperimentOptions? : models.JobsListByExperimentOptions, customHeaders? : { [headerName: string]: string; } }): Promise; - listByExperiment(resourceGroupName: string, workspaceName: string, experimentName: string, callback: ServiceCallback): void; - listByExperiment(resourceGroupName: string, workspaceName: string, experimentName: string, options: { jobsListByExperimentOptions? : models.JobsListByExperimentOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByWorkspaceNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listByWorkspaceNext(nextPageLink: string, callback: ServiceCallback): void; + listByWorkspaceNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; +} + +/** + * @class + * Clusters + * __NOTE__: An instance of this class is automatically created for an + * instance of the BatchAIManagementClient. + */ +export interface Clusters { /** - * Creates a Job in the given Experiment. + * Creates a Cluster in the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4229,286 +4420,189 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. - * - * @param {object} parameters The parameters to provide for job creation. - * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters The parameters to provide for the Cluster + * creation. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For + * information about available VM sizes for clusters using images from the + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to - * download the container from. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * - * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * @param {object} [parameters.virtualMachineConfiguration.imageReference] + * Image reference. OS image reference for cluster nodes. * * @param {string} - * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * @param {string} parameters.virtualMachineConfiguration.imageReference.offer + * Offer. Offer of the image. * - * @param {object} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * @param {string} parameters.virtualMachineConfiguration.imageReference.sku + * SKU. SKU of the image. * * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id - * The ID of the resource + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK - * (aka Microsoft Cognitive Toolkit) job. - * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. - * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. - * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the + * cluster. For information about the firewall settings for the Batch node + * agent to communicate with the Batch service see + * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + * Note, you need to provide publisher, offer and sku of the base OS image of + * which the custom image has been derived from. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to + * download/install software. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated + * or rebooted. The command is executed in a bash subshell as a root. * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for - * pyTorch job. + * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component + * Component ID. Azure Application Insights component resource ID. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be - * specified if configFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. - * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property - * - * @param {object} [parameters.caffe2Settings] Specifies the settings for - * Caffe2 job. - * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {object} [parameters.chainerSettings] Specifies the settings for - * Chainer job. - * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property - * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. - * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. - * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. - * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. - * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property - * - * @param {object} [parameters.horovodSettings] Specifies the settings for - * Horovod job. - * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {object} [parameters.constraints] Constraints associated with the - * Job. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -4517,14 +4611,14 @@ export interface Jobs { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + createWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates a Job in the given Experiment. + * Creates a Cluster in the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4533,286 +4627,189 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. - * - * @param {object} parameters The parameters to provide for job creation. - * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters The parameters to provide for the Cluster + * creation. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For + * information about available VM sizes for clusters using images from the + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to - * download the container from. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * - * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * @param {object} [parameters.virtualMachineConfiguration.imageReference] + * Image reference. OS image reference for cluster nodes. * * @param {string} - * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * @param {string} parameters.virtualMachineConfiguration.imageReference.offer + * Offer. Offer of the image. * - * @param {object} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * @param {string} parameters.virtualMachineConfiguration.imageReference.sku + * SKU. SKU of the image. * * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id - * The ID of the resource + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK - * (aka Microsoft Cognitive Toolkit) job. - * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. - * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. - * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the + * cluster. For information about the firewall settings for the Batch node + * agent to communicate with the Batch service see + * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + * Note, you need to provide publisher, offer and sku of the base OS image of + * which the custom image has been derived from. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to + * download/install software. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated + * or rebooted. The command is executed in a bash subshell as a root. * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for - * pyTorch job. + * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component + * Component ID. Azure Application Insights component resource ID. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be - * specified if configFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. - * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property - * - * @param {object} [parameters.caffe2Settings] Specifies the settings for - * Caffe2 job. - * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {object} [parameters.chainerSettings] Specifies the settings for - * Chainer job. - * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property - * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. - * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. - * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. - * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. - * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property - * - * @param {object} [parameters.horovodSettings] Specifies the settings for - * Horovod job. - * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {object} [parameters.constraints] Constraints associated with the - * Job. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -4826,7 +4823,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {Job} - The deserialized result object. + * @resolve {Cluster} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -4834,20 +4831,20 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Job} [result] - The deserialized result object if an error did not occur. - * See {@link Job} for more information. + * {Cluster} [result] - The deserialized result object if an error did not occur. + * See {@link Cluster} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - create(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - create(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, callback: ServiceCallback): void; - create(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + create(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, callback: ServiceCallback): void; + create(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Deletes a Job. + * Updates properties of a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4856,30 +4853,57 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * + * @param {object} [options.scaleSettings] Scale settings. Desired scale + * settings for the cluster. Batch AI service supports manual and auto scale + * clusters. + * + * @param {object} [options.scaleSettings.manual] Manual scale settings. Manual + * scale settings for the cluster. + * + * @param {number} options.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. + * + * @param {string} [options.scaleSettings.manual.nodeDeallocationOption] Node + * deallocation options. An action to be performed when the cluster size is + * decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' + * + * @param {object} [options.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. + * + * @param {number} options.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. + * + * @param {number} options.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. + * + * @param {number} [options.scaleSettings.autoScale.initialNodeCount] Initial + * node count. The number of compute nodes to allocate on cluster creation. + * Note that this value is used only during cluster creation. Default: 0. + * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + updateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { scaleSettings? : models.ScaleSettings, customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Deletes a Job. + * Updates properties of a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4888,17 +4912,44 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * + * @param {object} [options.scaleSettings] Scale settings. Desired scale + * settings for the cluster. Batch AI service supports manual and auto scale + * clusters. + * + * @param {object} [options.scaleSettings.manual] Manual scale settings. Manual + * scale settings for the cluster. + * + * @param {number} options.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. + * + * @param {string} [options.scaleSettings.manual.nodeDeallocationOption] Node + * deallocation options. An action to be performed when the cluster size is + * decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' + * + * @param {object} [options.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. + * + * @param {number} options.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. + * + * @param {number} options.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. + * + * @param {number} [options.scaleSettings.autoScale.initialNodeCount] Initial + * node count. The number of compute nodes to allocate on cluster creation. + * Note that this value is used only during cluster creation. Default: 0. + * * @param {object} [options.customHeaders] Headers that will be added to the * request * @@ -4909,7 +4960,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {null} - The deserialized result object. + * @resolve {Cluster} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -4917,19 +4968,20 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {null} [result] - The deserialized result object if an error did not occur. + * {Cluster} [result] - The deserialized result object if an error did not occur. + * See {@link Cluster} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; - deleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + update(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { scaleSettings? : models.ScaleSettings, customHeaders? : { [headerName: string]: string; } }): Promise; + update(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; + update(resourceGroupName: string, workspaceName: string, clusterName: string, options: { scaleSettings? : models.ScaleSettings, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets information about a Job. + * Deletes a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4938,14 +4990,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -4954,14 +5002,14 @@ export interface Jobs { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + deleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets information about a Job. + * Deletes a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -4970,14 +5018,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -4991,7 +5035,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {Job} - The deserialized result object. + * @resolve {null} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -4999,22 +5043,19 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {Job} [result] - The deserialized result object if an error did not occur. - * See {@link Job} for more information. + * {null} [result] - The deserialized result object if an error did not occur. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - get(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - get(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; - get(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + deleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; + deleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * List all directories and files inside the given directory of the Job's - * output directory (if the output directory is on Azure File Share or Azure - * Storage Container). + * Gets information about a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5023,30 +5064,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. - * - * @param {object} jobsListOutputFilesOptions Additional parameters for the - * operation - * - * @param {string} jobsListOutputFilesOptions.outputdirectoryid Id of the job - * output directory. This is the OutputDirectory-->id parameter that is given - * by the user during Create Job. - * - * @param {string} [jobsListOutputFilesOptions.directory] The path to the - * directory. - * - * @param {number} [jobsListOutputFilesOptions.linkexpiryinminutes] The number - * of minutes after which the download link will expire. - * - * @param {number} [jobsListOutputFilesOptions.maxResults] The maximum number - * of items to return in the response. A maximum of 1000 files can be returned. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -5055,16 +5076,14 @@ export interface Jobs { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listOutputFilesWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + getWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * List all directories and files inside the given directory of the Job's - * output directory (if the output directory is on Azure File Share or Azure - * Storage Container). + * Gets information about a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5073,30 +5092,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. - * - * @param {object} jobsListOutputFilesOptions Additional parameters for the - * operation - * - * @param {string} jobsListOutputFilesOptions.outputdirectoryid Id of the job - * output directory. This is the OutputDirectory-->id parameter that is given - * by the user during Create Job. - * - * @param {string} [jobsListOutputFilesOptions.directory] The path to the - * directory. - * - * @param {number} [jobsListOutputFilesOptions.linkexpiryinminutes] The number - * of minutes after which the download link will expire. - * - * @param {number} [jobsListOutputFilesOptions.maxResults] The maximum number - * of items to return in the response. A maximum of 1000 files can be returned. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -5110,7 +5109,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {FileListResult} - The deserialized result object. + * @resolve {Cluster} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -5118,22 +5117,20 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {FileListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileListResult} for more information. + * {Cluster} [result] - The deserialized result object if an error did not occur. + * See {@link Cluster} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listOutputFiles(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listOutputFiles(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, callback: ServiceCallback): void; - listOutputFiles(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, jobsListOutputFilesOptions: models.JobsListOutputFilesOptions, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + get(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; + get(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of currently existing nodes which were used for the Job - * execution. The returned information contains the node ID, its public IP and - * SSH port. + * Get the IP address, port of all the compute nodes in the Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5142,14 +5139,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -5162,12 +5155,10 @@ export interface Jobs { * * @reject {Error|ServiceError} - The error object. */ - listRemoteLoginInformationWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listRemoteLoginInformationWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of currently existing nodes which were used for the Job - * execution. The returned information contains the node ID, its public IP and - * SSH port. + * Get the IP address, port of all the compute nodes in the Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5176,14 +5167,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -5213,13 +5200,13 @@ export interface Jobs { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; - listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; + listRemoteLoginInformation(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Terminates a job. + * Gets information about Clusters associated with the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5228,30 +5215,28 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {object} [options] Optional Parameters. * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {object} [options.clustersListByWorkspaceOptions] Additional + * parameters for the operation * - * @param {object} [options] Optional Parameters. + * @param {number} [options.clustersListByWorkspaceOptions.maxResults] The + * maximum number of items to return in the response. A maximum of 1000 files + * can be returned. * * @param {object} [options.customHeaders] Headers that will be added to the * request * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - terminateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listByWorkspaceWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, options?: { clustersListByWorkspaceOptions? : models.ClustersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Terminates a job. + * Gets information about Clusters associated with the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5260,16 +5245,14 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {object} [options] Optional Parameters. * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {object} [options.clustersListByWorkspaceOptions] Additional + * parameters for the operation * - * @param {object} [options] Optional Parameters. + * @param {number} [options.clustersListByWorkspaceOptions.maxResults] The + * maximum number of items to return in the response. A maximum of 1000 files + * can be returned. * * @param {object} [options.customHeaders] Headers that will be added to the * request @@ -5281,7 +5264,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {null} - The deserialized result object. + * @resolve {ClusterListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -5289,19 +5272,20 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {null} [result] - The deserialized result object if an error did not occur. + * {ClusterListResult} [result] - The deserialized result object if an error did not occur. + * See {@link ClusterListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - terminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - terminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; - terminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByWorkspace(resourceGroupName: string, workspaceName: string, options?: { clustersListByWorkspaceOptions? : models.ClustersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }): Promise; + listByWorkspace(resourceGroupName: string, workspaceName: string, callback: ServiceCallback): void; + listByWorkspace(resourceGroupName: string, workspaceName: string, options: { clustersListByWorkspaceOptions? : models.ClustersListByWorkspaceOptions, customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Creates a Job in the given Experiment. + * Creates a Cluster in the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5310,286 +5294,189 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. - * - * @param {object} parameters The parameters to provide for job creation. - * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters The parameters to provide for the Cluster + * creation. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For + * information about available VM sizes for clusters using images from the + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to - * download the container from. + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * - * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * @param {object} [parameters.virtualMachineConfiguration.imageReference] + * Image reference. OS image reference for cluster nodes. * * @param {string} - * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * @param {string} parameters.virtualMachineConfiguration.imageReference.offer + * Offer. Offer of the image. * - * @param {object} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * @param {string} parameters.virtualMachineConfiguration.imageReference.sku + * SKU. SKU of the image. * * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id - * The ID of the resource + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK - * (aka Microsoft Cognitive Toolkit) job. - * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. - * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. - * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the + * cluster. For information about the firewall settings for the Batch node + * agent to communicate with the Batch service see + * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + * Note, you need to provide publisher, offer and sku of the base OS image of + * which the custom image has been derived from. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to + * download/install software. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated + * or rebooted. The command is executed in a bash subshell as a root. * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for - * pyTorch job. + * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component + * Component ID. Azure Application Insights component resource ID. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be - * specified if configFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. - * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property - * - * @param {object} [parameters.caffe2Settings] Specifies the settings for - * Caffe2 job. - * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {object} [parameters.chainerSettings] Specifies the settings for - * Chainer job. - * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property - * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. - * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. - * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. - * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. - * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property - * - * @param {object} [parameters.horovodSettings] Specifies the settings for - * Horovod job. - * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {object} [parameters.constraints] Constraints associated with the - * Job. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -5598,14 +5485,14 @@ export interface Jobs { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginCreateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Creates a Job in the given Experiment. + * Creates a Cluster in the given Workspace. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -5614,369 +5501,189 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. - * - * @param {object} parameters The parameters to provide for job creation. - * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' - * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. - * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. - * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. - * - * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. - * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. - * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. - * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes - * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. - * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to - * download the container from. - * - * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. - * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. - * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. - * - * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. - * - * @param {string} - * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. - * - * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. - * - * @param {object} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. - * - * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id - * The ID of the resource - * - * @param {string} - * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. - * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK - * (aka Microsoft Cognitive Toolkit) job. - * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. - * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. - * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. - * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. - * - * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. - * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property - * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for - * pyTorch job. - * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. - * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. - * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. - * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. - * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. - * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. - * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. - * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. - * - * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be - * specified if configFilePath is specified. - * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. - * - * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. - * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property - * - * @param {object} [parameters.caffe2Settings] Specifies the settings for - * Caffe2 job. - * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {object} [parameters.chainerSettings] Specifies the settings for - * Chainer job. - * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. - * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. - * - * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script - * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property - * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. - * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. + * @param {object} parameters The parameters to provide for the Cluster + * creation. * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. + * @param {string} parameters.vmSize VM size. The size of the virtual machines + * in the cluster. All nodes in a cluster have the same VM size. For + * information about available VM sizes for clusters using images from the + * Virtual Machines Marketplace see Sizes for Virtual Machines (Linux). Batch + * AI service supports all Azure VM sizes except STANDARD_A0 and those with + * premium storage (STANDARD_GS, STANDARD_DS, and STANDARD_DSV2 series). * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property + * @param {string} [parameters.vmPriority] VM priority. VM priority. Allowed + * values are: dedicated (default) and lowpriority. Possible values include: + * 'dedicated', 'lowpriority' * - * @param {object} [parameters.horovodSettings] Specifies the settings for - * Horovod job. + * @param {object} [parameters.scaleSettings] Scale settings. Scale settings + * for the cluster. Batch AI service supports manual and auto scale clusters. * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {object} [parameters.scaleSettings.manual] Manual scale settings. + * Manual scale settings for the cluster. * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {number} parameters.scaleSettings.manual.targetNodeCount Target node + * count. The desired number of compute nodes in the Cluster. Default is 0. * - * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * @param {string} [parameters.scaleSettings.manual.nodeDeallocationOption] + * Node deallocation options. An action to be performed when the cluster size + * is decreasing. The default value is requeue. Possible values include: + * 'requeue', 'terminate', 'waitforjobcompletion' * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for - * this property is equal to nodeCount property + * @param {object} [parameters.scaleSettings.autoScale] Auto-scale settings. + * Auto-scale settings for the cluster. * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {number} parameters.scaleSettings.autoScale.minimumNodeCount Minimum + * node count. The minimum number of compute nodes the Batch AI service will + * try to allocate for the cluster. Note, the actual number of nodes can be + * less than the specified value if the subscription has not enough quota to + * fulfill the request. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {number} parameters.scaleSettings.autoScale.maximumNodeCount Maximum + * node count. The maximum number of compute nodes the cluster can have. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {number} [parameters.scaleSettings.autoScale.initialNodeCount] + * Initial node count. The number of compute nodes to allocate on cluster + * creation. Note that this value is used only during cluster creation. + * Default: 0. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {object} [parameters.virtualMachineConfiguration] VM configuration. + * OS image configuration for cluster nodes. All nodes in a cluster have the + * same OS image. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {object} [parameters.virtualMachineConfiguration.imageReference] + * Image reference. OS image reference for cluster nodes. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {string} + * parameters.virtualMachineConfiguration.imageReference.publisher Publisher. + * Publisher of the image. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {string} parameters.virtualMachineConfiguration.imageReference.offer + * Offer. Offer of the image. * - * @param {object} [parameters.constraints] Constraints associated with the - * Job. + * @param {string} parameters.virtualMachineConfiguration.imageReference.sku + * SKU. SKU of the image. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {string} + * [parameters.virtualMachineConfiguration.imageReference.version] Version. + * Version of the image. * - * @param {object} [options] Optional Parameters. + * @param {string} + * [parameters.virtualMachineConfiguration.imageReference.virtualMachineImageId] + * Custom VM image resource ID. The ARM resource identifier of the virtual + * machine image for the compute nodes. This is of the form + * /subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Compute/images/{imageName}. + * The virtual machine image must be in the same region and subscription as the + * cluster. For information about the firewall settings for the Batch node + * agent to communicate with the Batch service see + * https://docs.microsoft.com/en-us/azure/batch/batch-api-basics#virtual-network-vnet-and-firewall-configuration. + * Note, you need to provide publisher, offer and sku of the base OS image of + * which the custom image has been derived from. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} [parameters.nodeSetup] Node setup. Setup to be performed on + * each compute node in the cluster. * - * @param {ServiceCallback} [optionalCallback] - The optional callback. + * @param {object} [parameters.nodeSetup.setupTask] Setup task. Setup task to + * run on cluster nodes when nodes got created or rebooted. The setup task code + * needs to be idempotent. Generally the setup task is used to download static + * data that is required for all jobs that run on the cluster VMs and/or to + * download/install software. * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. + * @param {string} parameters.nodeSetup.setupTask.commandLine Command line. The + * command line to be executed on each cluster's node after it being allocated + * or rebooted. The command is executed in a bash subshell as a root. * - * {Promise} A promise is returned. + * @param {array} [parameters.nodeSetup.setupTask.environmentVariables] + * Environment variables. A collection of user defined environment variables to + * be set for setup task. * - * @resolve {Job} - The deserialized result object. + * @param {array} [parameters.nodeSetup.setupTask.secrets] Secrets. A + * collection of user defined environment variables with secret values to be + * set for the setup task. Server will never report values of these variables + * back. * - * @reject {Error|ServiceError} - The error object. + * @param {string} parameters.nodeSetup.setupTask.stdOutErrPathPrefix Output + * path prefix. The prefix of a path where the Batch AI service will upload the + * stdout, stderr and execution log of the setup task. * - * {ServiceCallback} optionalCallback(err, result, request, response) + * @param {object} [parameters.nodeSetup.mountVolumes] Mount volumes. Mount + * volumes to be available to setup task and all jobs executing on the cluster. + * The volumes will be mounted at location specified by $AZ_BATCHAI_MOUNT_ROOT + * environment variable. * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. + * @param {array} [parameters.nodeSetup.mountVolumes.azureFileShares] Azure + * File Shares. A collection of Azure File Shares that are to be mounted to the + * cluster nodes. * - * {Job} [result] - The deserialized result object if an error did not occur. - * See {@link Job} for more information. + * @param {array} [parameters.nodeSetup.mountVolumes.azureBlobFileSystems] + * Azure Blob file systems. A collection of Azure Blob Containers that are to + * be mounted to the cluster nodes. * - * {WebResource} [request] - The HTTP Request object if an error did not occur. + * @param {array} [parameters.nodeSetup.mountVolumes.fileServers] File Servers. + * A collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, callback: ServiceCallback): void; - beginCreate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, parameters: models.JobCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Deletes a Job. + * @param {array} [parameters.nodeSetup.mountVolumes.unmanagedFileSystems] + * Unmanaged file systems. A collection of unmanaged file systems that are to + * be mounted to the cluster nodes. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {object} [parameters.nodeSetup.performanceCountersSettings] + * Performance counters settings. Settings for performance counters collecting + * and uploading. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference Azure + * Application Insights reference. Azure Application Insights information for + * performance counters reporting. If provided, Batch AI will upload node + * performance counters to the corresponding Azure Application Insights + * account. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.component + * Component ID. Azure Application Insights component resource ID. * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKey] + * Instrumentation Key. Value of the Azure Application Insights instrumentation + * key. * - * @param {object} [options] Optional Parameters. + * @param {object} + * [parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference] + * Instrumentation key KeyVault Secret reference. KeyVault Store and Secret + * which contains Azure Application Insights instrumentation key. One of + * instrumentationKey or instrumentationKeySecretReference must be specified. * - * @param {object} [options.customHeaders] Headers that will be added to the - * request + * @param {object} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.sourceVault + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * - * @returns {Promise} A promise is returned + * @param {string} + * parameters.nodeSetup.performanceCountersSettings.appInsightsReference.instrumentationKeySecretReference.secretUrl + * Secret URL. The URL referencing a secret in the Key Vault. * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @param {object} parameters.userAccountSettings User account settings. + * Settings for an administrator user account that will be created on each + * compute node in the cluster. * - * @reject {Error|ServiceError} - The error object. - */ - beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Deletes a Job. + * @param {string} parameters.userAccountSettings.adminUserName User name. Name + * of the administrator user account which can be used to SSH to nodes. * - * @param {string} resourceGroupName Name of the resource group to which the - * resource belongs. + * @param {string} [parameters.userAccountSettings.adminUserSshPublicKey] SSH + * public key. SSH public key of the administrator user account. * - * @param {string} workspaceName The name of the workspace. Workspace names can - * only contain a combination of alphanumeric characters along with dash (-) - * and underscore (_). The name must be from 1 through 64 characters long. + * @param {string} [parameters.userAccountSettings.adminUserPassword] Password. + * Password of the administrator user account. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. + * @param {object} [parameters.subnet] Subnet. Existing virtual network subnet + * to put the cluster nodes in. Note, if a File Server mount configured in node + * setup, the File Server's subnet will be used automatically. * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} parameters.subnet.id The ID of the resource * * @param {object} [options] Optional Parameters. * @@ -5990,7 +5697,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {null} - The deserialized result object. + * @resolve {Cluster} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -5998,19 +5705,20 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {null} [result] - The deserialized result object if an error did not occur. + * {Cluster} [result] - The deserialized result object if an error did not occur. + * See {@link Cluster} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; - beginDeleteMethod(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginCreate(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, callback: ServiceCallback): void; + beginCreate(resourceGroupName: string, workspaceName: string, clusterName: string, parameters: models.ClusterCreateParameters, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Terminates a job. + * Deletes a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -6019,14 +5727,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -6039,10 +5743,10 @@ export interface Jobs { * * @reject {Error|ServiceError} - The error object. */ - beginTerminateWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + beginDeleteMethodWithHttpOperationResponse(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Terminates a job. + * Deletes a Cluster. * * @param {string} resourceGroupName Name of the resource group to which the * resource belongs. @@ -6051,14 +5755,10 @@ export interface Jobs { * only contain a combination of alphanumeric characters along with dash (-) * and underscore (_). The name must be from 1 through 64 characters long. * - * @param {string} experimentName The name of the experiment. Experiment names - * can only contain a combination of alphanumeric characters along with dash - * (-) and underscore (_). The name must be from 1 through 64 characters long. - * - * @param {string} jobName The name of the job within the specified resource - * group. Job names can only contain a combination of alphanumeric characters - * along with dash (-) and underscore (_). The name must be from 1 through 64 - * characters long. + * @param {string} clusterName The name of the cluster within the specified + * resource group. Cluster names can only contain a combination of alphanumeric + * characters along with dash (-) and underscore (_). The name must be from 1 + * through 64 characters long. * * @param {object} [options] Optional Parameters. * @@ -6086,72 +5786,13 @@ export interface Jobs { * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - beginTerminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - beginTerminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, callback: ServiceCallback): void; - beginTerminate(resourceGroupName: string, workspaceName: string, experimentName: string, jobName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; - - - /** - * Gets a list of Jobs within the specified Experiment. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @returns {Promise} A promise is returned - * - * @resolve {HttpOperationResponse} - The deserialized result object. - * - * @reject {Error|ServiceError} - The error object. - */ - listByExperimentNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; - - /** - * Gets a list of Jobs within the specified Experiment. - * - * @param {string} nextPageLink The NextLink from the previous successful call - * to List operation. - * - * @param {object} [options] Optional Parameters. - * - * @param {object} [options.customHeaders] Headers that will be added to the - * request - * - * @param {ServiceCallback} [optionalCallback] - The optional callback. - * - * @returns {ServiceCallback|Promise} If a callback was passed as the last - * parameter then it returns the callback else returns a Promise. - * - * {Promise} A promise is returned. - * - * @resolve {JobListResult} - The deserialized result object. - * - * @reject {Error|ServiceError} - The error object. - * - * {ServiceCallback} optionalCallback(err, result, request, response) - * - * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. - * - * {JobListResult} [result] - The deserialized result object if an error did not occur. - * See {@link JobListResult} for more information. - * - * {WebResource} [request] - The HTTP Request object if an error did not occur. - * - * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. - */ - listByExperimentNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listByExperimentNext(nextPageLink: string, callback: ServiceCallback): void; - listByExperimentNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, callback: ServiceCallback): void; + beginDeleteMethod(resourceGroupName: string, workspaceName: string, clusterName: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * List all directories and files inside the given directory of the Job's - * output directory (if the output directory is on Azure File Share or Azure - * Storage Container). + * Get the IP address, port of all the compute nodes in the Cluster. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -6163,16 +5804,14 @@ export interface Jobs { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listOutputFilesNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listRemoteLoginInformationNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * List all directories and files inside the given directory of the Job's - * output directory (if the output directory is on Azure File Share or Azure - * Storage Container). + * Get the IP address, port of all the compute nodes in the Cluster. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -6189,7 +5828,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {FileListResult} - The deserialized result object. + * @resolve {RemoteLoginInformationListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -6197,22 +5836,21 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {FileListResult} [result] - The deserialized result object if an error did not occur. - * See {@link FileListResult} for more information. + * {RemoteLoginInformationListResult} [result] - The deserialized result object if an error did not occur. + * See {@link RemoteLoginInformationListResult} for more + * information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listOutputFilesNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listOutputFilesNext(nextPageLink: string, callback: ServiceCallback): void; - listOutputFilesNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listRemoteLoginInformationNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listRemoteLoginInformationNext(nextPageLink: string, callback: ServiceCallback): void; + listRemoteLoginInformationNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; /** - * Gets a list of currently existing nodes which were used for the Job - * execution. The returned information contains the node ID, its public IP and - * SSH port. + * Gets information about Clusters associated with the given Workspace. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -6224,16 +5862,14 @@ export interface Jobs { * * @returns {Promise} A promise is returned * - * @resolve {HttpOperationResponse} - The deserialized result object. + * @resolve {HttpOperationResponse} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. */ - listRemoteLoginInformationNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; + listByWorkspaceNextWithHttpOperationResponse(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise>; /** - * Gets a list of currently existing nodes which were used for the Job - * execution. The returned information contains the node ID, its public IP and - * SSH port. + * Gets information about Clusters associated with the given Workspace. * * @param {string} nextPageLink The NextLink from the previous successful call * to List operation. @@ -6250,7 +5886,7 @@ export interface Jobs { * * {Promise} A promise is returned. * - * @resolve {RemoteLoginInformationListResult} - The deserialized result object. + * @resolve {ClusterListResult} - The deserialized result object. * * @reject {Error|ServiceError} - The error object. * @@ -6258,15 +5894,14 @@ export interface Jobs { * * {Error|ServiceError} err - The Error object if an error occurred, null otherwise. * - * {RemoteLoginInformationListResult} [result] - The deserialized result object if an error did not occur. - * See {@link RemoteLoginInformationListResult} for more - * information. + * {ClusterListResult} [result] - The deserialized result object if an error did not occur. + * See {@link ClusterListResult} for more information. * * {WebResource} [request] - The HTTP Request object if an error did not occur. * * {http.IncomingMessage} [response] - The HTTP Response stream if an error did not occur. */ - listRemoteLoginInformationNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; - listRemoteLoginInformationNext(nextPageLink: string, callback: ServiceCallback): void; - listRemoteLoginInformationNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; + listByWorkspaceNext(nextPageLink: string, options?: { customHeaders? : { [headerName: string]: string; } }): Promise; + listByWorkspaceNext(nextPageLink: string, callback: ServiceCallback): void; + listByWorkspaceNext(nextPageLink: string, options: { customHeaders? : { [headerName: string]: string; } }, callback: ServiceCallback): void; } diff --git a/lib/services/batchaiManagement/lib/operations/index.js b/lib/services/batchaiManagement/lib/operations/index.js index 234120bcdd..6f423c7864 100644 --- a/lib/services/batchaiManagement/lib/operations/index.js +++ b/lib/services/batchaiManagement/lib/operations/index.js @@ -16,8 +16,8 @@ exports.Operations = require('./operations'); exports.Usages = require('./usages'); -exports.Clusters = require('./clusters'); -exports.FileServers = require('./fileServers'); exports.Workspaces = require('./workspaces'); exports.Experiments = require('./experiments'); exports.Jobs = require('./jobs'); +exports.FileServers = require('./fileServers'); +exports.Clusters = require('./clusters'); diff --git a/lib/services/batchaiManagement/lib/operations/jobs.js b/lib/services/batchaiManagement/lib/operations/jobs.js index 8c1e523aeb..ea6acacff0 100644 --- a/lib/services/batchaiManagement/lib/operations/jobs.js +++ b/lib/services/batchaiManagement/lib/operations/jobs.js @@ -249,72 +249,76 @@ function _listByExperiment(resourceGroupName, workspaceName, experimentName, opt * * @param {object} parameters The parameters to provide for job creation. * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to * download the container from. * * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * Server URL. URL for image repository. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * name. User name to login to the repository. * * @param {string} * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * * @param {object} * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * * @param {object} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id @@ -322,202 +326,212 @@ function _listByExperiment(resourceGroupName, workspaceName, experimentName, opt * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. + * + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for * pyTorch job. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if * pythonScriptFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be * specified if configFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {object} [parameters.caffe2Settings] Specifies the settings for + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for * Caffe2 job. * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [parameters.chainerSettings] Specifies the settings for + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for * Chainer job. * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.horovodSettings] Specifies the settings for + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for * Horovod job. * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. * * @param {object} [parameters.constraints] Constraints associated with the * Job. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -1469,72 +1483,76 @@ function _terminate(resourceGroupName, workspaceName, experimentName, jobName, o * * @param {object} parameters The parameters to provide for job creation. * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to * download the container from. * * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * Server URL. URL for image repository. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * name. User name to login to the repository. * * @param {string} * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * * @param {object} * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * * @param {object} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id @@ -1542,202 +1560,212 @@ function _terminate(resourceGroupName, workspaceName, experimentName, jobName, o * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. + * + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for * pyTorch job. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if * pythonScriptFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be * specified if configFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {object} [parameters.caffe2Settings] Specifies the settings for + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for * Caffe2 job. * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [parameters.chainerSettings] Specifies the settings for + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for * Chainer job. * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.horovodSettings] Specifies the settings for + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for * Horovod job. * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. * * @param {object} [parameters.constraints] Constraints associated with the * Job. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -2884,72 +2912,76 @@ class Jobs { * * @param {object} parameters The parameters to provide for job creation. * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to * download the container from. * * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * Server URL. URL for image repository. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * name. User name to login to the repository. * * @param {string} * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * * @param {object} * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * * @param {object} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id @@ -2957,202 +2989,212 @@ class Jobs { * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. + * + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for * pyTorch job. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if * pythonScriptFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be * specified if configFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {object} [parameters.caffe2Settings] Specifies the settings for + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for * Caffe2 job. * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [parameters.chainerSettings] Specifies the settings for + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for * Chainer job. * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.horovodSettings] Specifies the settings for + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for * Horovod job. * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. * * @param {object} [parameters.constraints] Constraints associated with the * Job. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -3200,72 +3242,76 @@ class Jobs { * * @param {object} parameters The parameters to provide for job creation. * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to * download the container from. * * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * Server URL. URL for image repository. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * name. User name to login to the repository. * * @param {string} * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * * @param {object} * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * * @param {object} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id @@ -3273,202 +3319,212 @@ class Jobs { * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. + * + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for * pyTorch job. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if * pythonScriptFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be * specified if configFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {object} [parameters.caffe2Settings] Specifies the settings for + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for * Caffe2 job. * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [parameters.chainerSettings] Specifies the settings for + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for * Chainer job. * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.horovodSettings] Specifies the settings for + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for * Horovod job. * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. * * @param {object} [parameters.constraints] Constraints associated with the * Job. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -4127,72 +4183,76 @@ class Jobs { * * @param {object} parameters The parameters to provide for job creation. * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to * download the container from. * * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * Server URL. URL for image repository. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * name. User name to login to the repository. * * @param {string} * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * * @param {object} * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * * @param {object} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id @@ -4200,202 +4260,212 @@ class Jobs { * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. + * + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for * pyTorch job. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if * pythonScriptFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be * specified if configFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {object} [parameters.caffe2Settings] Specifies the settings for + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for * Caffe2 job. * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [parameters.chainerSettings] Specifies the settings for + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for * Chainer job. * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.horovodSettings] Specifies the settings for + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for * Horovod job. * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. * * @param {object} [parameters.constraints] Constraints associated with the * Job. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * @@ -4443,72 +4513,76 @@ class Jobs { * * @param {object} parameters The parameters to provide for job creation. * - * @param {string} [parameters.schedulingPriority] Scheduling priority - * associated with the job. Scheduling priority associated with the job. - * Possible values include: 'low', 'normal', 'high' + * @param {string} [parameters.schedulingPriority] Scheduling priority. + * Scheduling priority associated with the job. Possible values: low, normal, + * high. Possible values include: 'low', 'normal', 'high' * - * @param {object} parameters.cluster Specifies the Id of the cluster on which - * this job will run. + * @param {object} parameters.cluster Cluster. Resource ID of the cluster on + * which this job will run. * - * @param {object} [parameters.mountVolumes] Information on mount volumes to be - * used by the job. These volumes will be mounted before the job execution and - * will be unmouted after the job completion. The volumes will be mounted at - * location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT environment variable. + * @param {object} [parameters.mountVolumes] Mount volumes. Information on + * mount volumes to be used by the job. These volumes will be mounted before + * the job execution and will be unmouted after the job completion. The volumes + * will be mounted at location specified by $AZ_BATCHAI_JOB_MOUNT_ROOT + * environment variable. * - * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Share - * setup configuration. References to Azure File Shares that are to be mounted - * to the cluster nodes. + * @param {array} [parameters.mountVolumes.azureFileShares] Azure File Shares. + * A collection of Azure File Shares that are to be mounted to the cluster + * nodes. * * @param {array} [parameters.mountVolumes.azureBlobFileSystems] Azure Blob - * FileSystem setup configuration. References to Azure Blob FUSE that are to be - * mounted to the cluster nodes. + * file systems. A collection of Azure Blob Containers that are to be mounted + * to the cluster nodes. * - * @param {array} [parameters.mountVolumes.fileServers] References to a list of - * file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.fileServers] File Servers. A + * collection of Batch AI File Servers that are to be mounted to the cluster + * nodes. * - * @param {array} [parameters.mountVolumes.unmanagedFileSystems] References to - * a list of file servers that are mounted to the cluster node. + * @param {array} [parameters.mountVolumes.unmanagedFileSystems] Unmanaged file + * systems. A collection of unmanaged file systems that are to be mounted to + * the cluster nodes. * - * @param {number} parameters.nodeCount Number of compute nodes to run the job - * on. The job will be gang scheduled on that many compute nodes + * @param {number} parameters.nodeCount Node count. Number of compute nodes to + * run the job on. The job will be gang scheduled on that many compute nodes. * - * @param {object} [parameters.containerSettings] If provided the job will run - * in the specified container. If the container was downloaded as part of - * cluster setup then the same container image will be used. If not provided, - * the job will run on the VM. + * @param {object} [parameters.containerSettings] Container settings. Docker + * container settings for the job. If not provided, the job will run directly + * on the node. * - * @param {object} parameters.containerSettings.imageSourceRegistry Registry to + * @param {object} parameters.containerSettings.imageSourceRegistry Image + * source registry. Information about docker image and docker registry to * download the container from. * * @param {string} [parameters.containerSettings.imageSourceRegistry.serverUrl] - * URL for image repository. + * Server URL. URL for image repository. * - * @param {string} parameters.containerSettings.imageSourceRegistry.image The - * name of the image in image repository. + * @param {string} parameters.containerSettings.imageSourceRegistry.image + * Image. The name of the image in the image repository. * * @param {object} - * [parameters.containerSettings.imageSourceRegistry.credentials] Information - * to access the private Docker repository. + * [parameters.containerSettings.imageSourceRegistry.credentials] Credentials. + * Credentials to access the private docker repository. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.username User - * name to login. + * name. User name to login to the repository. * * @param {string} * [parameters.containerSettings.imageSourceRegistry.credentials.password] - * Password to login. One of password or passwordSecretReference must be - * specified. + * Password. User password to login to the docker repository. One of password + * or passwordSecretReference must be specified. * * @param {object} * [parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference] - * Specifies the location of the password, which is a Key Vault Secret. Users - * can store their secrets in Azure KeyVault and pass it to the Batch AI - * Service to integrate with KeyVault. One of password or - * passwordSecretReference must be specified. + * Password secret reference. KeyVault Secret storing the password. Users can + * store their secrets in Azure KeyVault and pass it to the Batch AI service to + * integrate with KeyVault. One of password or passwordSecretReference must be + * specified. * * @param {object} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault - * Fully qualified resource Id for the Key Vault. + * Key Vault resource identifier. Fully qualified resource indentifier of the + * Key Vault. * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.sourceVault.id @@ -4516,202 +4590,212 @@ class Jobs { * * @param {string} * parameters.containerSettings.imageSourceRegistry.credentials.passwordSecretReference.secretUrl - * The URL referencing a secret in a Key Vault. + * Secret URL. The URL referencing a secret in the Key Vault. + * + * @param {string} [parameters.containerSettings.shmSize] /dev/shm size. Size + * of /dev/shm. Please refer to docker documentation for supported argument + * formats. * - * @param {object} [parameters.cntkSettings] Specifies the settings for CNTK + * @param {object} [parameters.cntkSettings] CNTK settings. Settings for CNTK * (aka Microsoft Cognitive Toolkit) job. * - * @param {string} [parameters.cntkSettings.languageType] Specifies the - * language type to use for launching CNTK (aka Microsoft Cognitive Toolkit) - * job. Valid values are 'BrainScript' or 'Python'. + * @param {string} [parameters.cntkSettings.languageType] Language type. The + * language to use for launching CNTK (aka Microsoft Cognitive Toolkit) job. + * Valid values are 'BrainScript' or 'Python'. * - * @param {string} [parameters.cntkSettings.configFilePath] Specifies the path - * of the config file. This property can be specified only if the languageType - * is 'BrainScript'. + * @param {string} [parameters.cntkSettings.configFilePath] Config file path. + * Specifies the path of the BrainScript config file. This property can be + * specified only if the languageType is 'BrainScript'. * - * @param {string} [parameters.cntkSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property can be - * specified only if the languageType is 'Python'. + * @param {string} [parameters.cntkSettings.pythonScriptFilePath] Python script + * file path. Python script to execute. This property can be specified only if + * the languageType is 'Python'. * - * @param {string} [parameters.cntkSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the languageType - * is 'Python'. + * @param {string} [parameters.cntkSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. This property can be + * specified only if the languageType is 'Python'. * * @param {string} [parameters.cntkSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script or CNTK.exe. + * arguments. Command line arguments that need to be passed to the python + * script or cntk executable. * - * @param {number} [parameters.cntkSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.cntkSettings.processCount] Process count. Number + * of processes to launch for the job execution. The default value for this + * property is equal to nodeCount property * - * @param {object} [parameters.pyTorchSettings] Specifies the settings for + * @param {object} [parameters.pyTorchSettings] pyTorch settings. Settings for * pyTorch job. * - * @param {string} parameters.pyTorchSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.pyTorchSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.pyTorchSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.pyTorchSettings.commandLineArgs] Specifies the - * command line arguments for the master task. + * @param {string} [parameters.pyTorchSettings.commandLineArgs] Command line + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.pyTorchSettings.processCount] Number of - * processes to launch for the job execution. The default value for this - * property is equal to nodeCount property. + * @param {number} [parameters.pyTorchSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {string} [parameters.pyTorchSettings.communicationBackend] Type of - * the communication backend for distributed jobs. Valid values are 'TCP', - * 'Gloo' or 'MPI'. Not required for non-distributed jobs. + * @param {string} [parameters.pyTorchSettings.communicationBackend] + * Communication backend. Type of the communication backend for distributed + * jobs. Valid values are 'TCP', 'Gloo' or 'MPI'. Not required for + * non-distributed jobs. * - * @param {object} [parameters.tensorFlowSettings] Specifies the settings for - * Tensor Flow job. + * @param {object} [parameters.tensorFlowSettings] TensorFlow settings. + * Settings for Tensor Flow job. * - * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath The path - * and file name of the python script to execute the job. + * @param {string} parameters.tensorFlowSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] The - * path to python interpreter. + * @param {string} [parameters.tensorFlowSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * - * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] - * Specifies the command line arguments for the master task. + * @param {string} [parameters.tensorFlowSettings.masterCommandLineArgs] Master + * command line arguments. Command line arguments that need to be passed to the + * python script for the master task. * - * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] - * Specifies the command line arguments for the worker task. This property is - * optional for single machine training. + * @param {string} [parameters.tensorFlowSettings.workerCommandLineArgs] Worker + * command line arguments. Command line arguments that need to be passed to the + * python script for the worker task. Optional for single process jobs. * * @param {string} - * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Specifies the - * command line arguments for the parameter server task. This property is - * optional for single machine training. - * - * @param {number} [parameters.tensorFlowSettings.workerCount] The number of - * worker tasks. If specified, the value must be less than or equal to - * (nodeCount * numberOfGPUs per VM). If not specified, the default value is - * equal to nodeCount. This property can be specified only for distributed - * TensorFlow training - * - * @param {number} [parameters.tensorFlowSettings.parameterServerCount] The - * number of parmeter server tasks. If specified, the value must be less than - * or equal to nodeCount. If not specified, the default value is equal to 1 for - * distributed TensorFlow training (This property is not applicable for single - * machine training). This property can be specified only for distributed - * TensorFlow training. - * - * @param {object} [parameters.caffeSettings] Specifies the settings for Caffe - * job. - * - * @param {string} [parameters.caffeSettings.configFilePath] Specifies the path - * of the config file. This property cannot be specified if + * [parameters.tensorFlowSettings.parameterServerCommandLineArgs] Parameter + * server command line arguments. Command line arguments that need to be passed + * to the python script for the parameter server. Optional for single process + * jobs. + * + * @param {number} [parameters.tensorFlowSettings.workerCount] Worker count. + * The number of worker tasks. If specified, the value must be less than or + * equal to (nodeCount * numberOfGPUs per VM). If not specified, the default + * value is equal to nodeCount. This property can be specified only for + * distributed TensorFlow training. + * + * @param {number} [parameters.tensorFlowSettings.parameterServerCount] + * Parameter server count. The number of parameter server tasks. If specified, + * the value must be less than or equal to nodeCount. If not specified, the + * default value is equal to 1 for distributed TensorFlow training. This + * property can be specified only for distributed TensorFlow training. + * + * @param {object} [parameters.caffeSettings] Caffe settings. Settings for + * Caffe job. + * + * @param {string} [parameters.caffeSettings.configFilePath] Config file path. + * Path of the config file for the job. This property cannot be specified if * pythonScriptFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonScriptFilePath] The path and - * file name of the python script to execute the job. This property cannot be + * @param {string} [parameters.caffeSettings.pythonScriptFilePath] Python + * script file path. Python script to execute. This property cannot be * specified if configFilePath is specified. * - * @param {string} [parameters.caffeSettings.pythonInterpreterPath] The path to - * python interpreter. This property can be specified only if the - * pythonScriptFilePath is specified. + * @param {string} [parameters.caffeSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. The property can be + * specified only if the pythonScriptFilePath is specified. * * @param {string} [parameters.caffeSettings.commandLineArgs] Command line - * arguments that needs to be passed to the Caffe job. + * arguments. Command line arguments that need to be passed to the Caffe job. * - * @param {number} [parameters.caffeSettings.processCount] Number of processes - * parameter that is passed to MPI runtime. The default value for this property - * is equal to nodeCount property + * @param {number} [parameters.caffeSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for + * this property is equal to nodeCount property * - * @param {object} [parameters.caffe2Settings] Specifies the settings for + * @param {object} [parameters.caffe2Settings] Caffe2 settings. Settings for * Caffe2 job. * - * @param {string} parameters.caffe2Settings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.caffe2Settings.pythonScriptFilePath Python script + * file path. The python script to execute. * - * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.caffe2Settings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.caffe2Settings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {object} [parameters.chainerSettings] Specifies the settings for + * @param {object} [parameters.chainerSettings] Chainer settings. Settings for * Chainer job. * - * @param {string} parameters.chainerSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.chainerSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.chainerSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.chainerSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.chainerSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.chainerSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.chainerSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.customToolkitSettings] Specifies the settings - * for custom tool kit job. + * @param {object} [parameters.customToolkitSettings] Custom tool kit job. + * Settings for custom tool kit job. * - * @param {string} [parameters.customToolkitSettings.commandLine] The command - * line to execute the custom toolkit Job. + * @param {string} [parameters.customToolkitSettings.commandLine] Command line. + * The command line to execute on the master node. * - * @param {object} [parameters.customMpiSettings] Specifies the settings for - * custom MPI job. + * @param {object} [parameters.customMpiSettings] Custom MPI settings. Settings + * for custom MPI job. * - * @param {string} parameters.customMpiSettings.commandLine The program and - * program command line parameters to be executed by mpi runtime. + * @param {string} parameters.customMpiSettings.commandLine Command line. The + * command line to be executed by mpi runtime on each compute node. * - * @param {number} [parameters.customMpiSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.customMpiSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.horovodSettings] Specifies the settings for + * @param {object} [parameters.horovodSettings] Horovod settings. Settings for * Horovod job. * - * @param {string} parameters.horovodSettings.pythonScriptFilePath The path and - * file name of the python script to execute the job. + * @param {string} parameters.horovodSettings.pythonScriptFilePath Python + * script file path. The python script to execute. * - * @param {string} [parameters.horovodSettings.pythonInterpreterPath] The path - * to python interpreter. + * @param {string} [parameters.horovodSettings.pythonInterpreterPath] Python + * interpreter path. The path to the Python interpreter. * * @param {string} [parameters.horovodSettings.commandLineArgs] Command line - * arguments that needs to be passed to the python script + * arguments. Command line arguments that need to be passed to the python + * script. * - * @param {number} [parameters.horovodSettings.processCount] Number of - * processes parameter that is passed to MPI runtime. The default value for + * @param {number} [parameters.horovodSettings.processCount] Process count. + * Number of processes to launch for the job execution. The default value for * this property is equal to nodeCount property * - * @param {object} [parameters.jobPreparation] Specifies the command line to be - * executed before tool kit is launched. The specified actions will run on all - * the nodes that are part of the job + * @param {object} [parameters.jobPreparation] Job preparation. A command line + * to be executed on each node allocated for the job before tool kit is + * launched. * - * @param {string} parameters.jobPreparation.commandLine The command line to - * execute. If containerSettings is specified on the job, this commandLine will - * be executed in the same container as job. Otherwise it will be executed on - * the node. + * @param {string} parameters.jobPreparation.commandLine Command line. The + * command line to execute. If containerSettings is specified on the job, this + * commandLine will be executed in the same container as job. Otherwise it will + * be executed on the node. * - * @param {string} parameters.stdOutErrPathPrefix The path where the Batch AI - * service will upload stdout and stderror of the job. + * @param {string} parameters.stdOutErrPathPrefix Standard output path prefix. + * The path where the Batch AI service will store stdout, stderror and + * execution log of the job. * - * @param {array} [parameters.inputDirectories] Specifies the list of input - * directories for the Job. + * @param {array} [parameters.inputDirectories] Input directories. A list of + * input directories for the job. * - * @param {array} [parameters.outputDirectories] Specifies the list of output - * directories. + * @param {array} [parameters.outputDirectories] Output directories. A list of + * output directories for the job. * - * @param {array} [parameters.environmentVariables] Additional environment - * variables to set on the job. Batch AI will setup these additional - * environment variables for the job. + * @param {array} [parameters.environmentVariables] Environment variables. A + * list of user defined environment variables which will be setup for the job. * - * @param {array} [parameters.secrets] Additional environment variables with - * secret values to set on the job. Batch AI will setup these additional - * environment variables for the job. Server will never report values of these - * variables back. + * @param {array} [parameters.secrets] Secrets. A list of user defined + * environment variables with secret values which will be setup for the job. + * Server will never report values of these variables back. * * @param {object} [parameters.constraints] Constraints associated with the * Job. * - * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max time - * the job can run. Default Value = 1 week. + * @param {moment.duration} [parameters.constraints.maxWallClockTime] Max wall + * clock time. Max time the job can run. Default value: 1 week. * * @param {object} [options] Optional Parameters. * diff --git a/lib/services/batchaiManagement/lib/operations/workspaces.js b/lib/services/batchaiManagement/lib/operations/workspaces.js index 129c3deb2b..e77df53759 100644 --- a/lib/services/batchaiManagement/lib/operations/workspaces.js +++ b/lib/services/batchaiManagement/lib/operations/workspaces.js @@ -352,12 +352,12 @@ function _listByResourceGroup(resourceGroupName, options, callback) { * * @param {object} parameters Workspace creation parameters. * - * @param {string} parameters.location The region in which to create the - * Workspace. - * - * @param {object} [parameters.tags] The user specified tags associated with + * @param {string} parameters.location Location. The region in which to create * the Workspace. * + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -428,6 +428,204 @@ function _create(resourceGroupName, workspaceName, parameters, options, callback }); } +/** + * Updates properties of a Workspace. + * + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {object} [options] Optional Parameters. + * + * @param {object} [options.tags] Tags. The user specified tags associated with + * the Workspace. + * + * @param {object} [options.customHeaders] Headers that will be added to the + * request + * + * @param {function} callback - The callback. + * + * @returns {function} callback(err, result, request, response) + * + * {Error} err - The Error object if an error occurred, null otherwise. + * + * {object} [result] - The deserialized result object if an error did not occur. + * See {@link Workspace} for more information. + * + * {object} [request] - The HTTP Request object if an error did not occur. + * + * {stream} [response] - The HTTP Response stream if an error did not occur. + */ +function _update(resourceGroupName, workspaceName, options, callback) { + /* jshint validthis: true */ + let client = this.client; + if(!callback && typeof options === 'function') { + callback = options; + options = null; + } + if (!callback) { + throw new Error('callback cannot be null.'); + } + let tags = (options && options.tags !== undefined) ? options.tags : undefined; + // Validate + try { + if (resourceGroupName === null || resourceGroupName === undefined || typeof resourceGroupName.valueOf() !== 'string') { + throw new Error('resourceGroupName cannot be null or undefined and it must be of type string.'); + } + if (resourceGroupName !== null && resourceGroupName !== undefined) { + if (resourceGroupName.match(/^[-\w\._]+$/) === null) + { + throw new Error('"resourceGroupName" should satisfy the constraint - "Pattern": /^[-\w\._]+$/'); + } + } + if (workspaceName === null || workspaceName === undefined || typeof workspaceName.valueOf() !== 'string') { + throw new Error('workspaceName cannot be null or undefined and it must be of type string.'); + } + if (workspaceName !== null && workspaceName !== undefined) { + if (workspaceName.length > 64) + { + throw new Error('"workspaceName" should satisfy the constraint - "MaxLength": 64'); + } + if (workspaceName.length < 1) + { + throw new Error('"workspaceName" should satisfy the constraint - "MinLength": 1'); + } + if (workspaceName.match(/^[-\w_]+$/) === null) + { + throw new Error('"workspaceName" should satisfy the constraint - "Pattern": /^[-\w_]+$/'); + } + } + if (this.client.apiVersion === null || this.client.apiVersion === undefined || typeof this.client.apiVersion.valueOf() !== 'string') { + throw new Error('this.client.apiVersion cannot be null or undefined and it must be of type string.'); + } + if (this.client.subscriptionId === null || this.client.subscriptionId === undefined || typeof this.client.subscriptionId.valueOf() !== 'string') { + throw new Error('this.client.subscriptionId cannot be null or undefined and it must be of type string.'); + } + if (tags && typeof tags === 'object') { + for(let valueElement in tags) { + if (tags[valueElement] !== null && tags[valueElement] !== undefined && typeof tags[valueElement].valueOf() !== 'string') { + throw new Error('tags[valueElement] must be of type string.'); + } + } + } + if (this.client.acceptLanguage !== null && this.client.acceptLanguage !== undefined && typeof this.client.acceptLanguage.valueOf() !== 'string') { + throw new Error('this.client.acceptLanguage must be of type string.'); + } + } catch (error) { + return callback(error); + } + let parameters; + if (tags !== null && tags !== undefined) { + parameters = new client.models['WorkspaceUpdateParameters'](); + parameters.tags = tags; + } + + // Construct URL + let baseUrl = this.client.baseUri; + let requestUrl = baseUrl + (baseUrl.endsWith('/') ? '' : '/') + 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.BatchAI/workspaces/{workspaceName}'; + requestUrl = requestUrl.replace('{resourceGroupName}', encodeURIComponent(resourceGroupName)); + requestUrl = requestUrl.replace('{workspaceName}', encodeURIComponent(workspaceName)); + requestUrl = requestUrl.replace('{subscriptionId}', encodeURIComponent(this.client.subscriptionId)); + let queryParameters = []; + queryParameters.push('api-version=' + encodeURIComponent(this.client.apiVersion)); + if (queryParameters.length > 0) { + requestUrl += '?' + queryParameters.join('&'); + } + + // Create HTTP transport objects + let httpRequest = new WebResource(); + httpRequest.method = 'PATCH'; + httpRequest.url = requestUrl; + httpRequest.headers = {}; + // Set Headers + httpRequest.headers['Content-Type'] = 'application/json; charset=utf-8'; + if (this.client.generateClientRequestId) { + httpRequest.headers['x-ms-client-request-id'] = msRestAzure.generateUuid(); + } + if (this.client.acceptLanguage !== undefined && this.client.acceptLanguage !== null) { + httpRequest.headers['accept-language'] = this.client.acceptLanguage; + } + if(options) { + for(let headerName in options['customHeaders']) { + if (options['customHeaders'].hasOwnProperty(headerName)) { + httpRequest.headers[headerName] = options['customHeaders'][headerName]; + } + } + } + // Serialize Request + let requestContent = null; + let requestModel = null; + try { + if (parameters !== null && parameters !== undefined) { + let requestModelMapper = new client.models['WorkspaceUpdateParameters']().mapper(); + requestModel = client.serialize(requestModelMapper, parameters, 'parameters'); + requestContent = JSON.stringify(requestModel); + } + } catch (error) { + let serializationError = new Error(`Error "${error.message}" occurred in serializing the ` + + `payload - ${JSON.stringify(parameters, null, 2)}.`); + return callback(serializationError); + } + httpRequest.body = requestContent; + // Send Request + return client.pipeline(httpRequest, (err, response, responseBody) => { + if (err) { + return callback(err); + } + let statusCode = response.statusCode; + if (statusCode !== 200) { + let error = new Error(responseBody); + error.statusCode = response.statusCode; + error.request = msRest.stripRequest(httpRequest); + error.response = msRest.stripResponse(response); + if (responseBody === '') responseBody = null; + let parsedErrorResponse; + try { + parsedErrorResponse = JSON.parse(responseBody); + if (parsedErrorResponse) { + if (parsedErrorResponse.error) parsedErrorResponse = parsedErrorResponse.error; + if (parsedErrorResponse.code) error.code = parsedErrorResponse.code; + if (parsedErrorResponse.message) error.message = parsedErrorResponse.message; + } + if (parsedErrorResponse !== null && parsedErrorResponse !== undefined) { + let resultMapper = new client.models['CloudError']().mapper(); + error.body = client.deserialize(resultMapper, parsedErrorResponse, 'error.body'); + } + } catch (defaultError) { + error.message = `Error "${defaultError.message}" occurred in deserializing the responseBody ` + + `- "${responseBody}" for the default response.`; + return callback(error); + } + return callback(error); + } + // Create Result + let result = null; + if (responseBody === '') responseBody = null; + // Deserialize Response + if (statusCode === 200) { + let parsedResponse = null; + try { + parsedResponse = JSON.parse(responseBody); + result = JSON.parse(responseBody); + if (parsedResponse !== null && parsedResponse !== undefined) { + let resultMapper = new client.models['Workspace']().mapper(); + result = client.deserialize(resultMapper, parsedResponse, 'result'); + } + } catch (error) { + let deserializationError = new Error(`Error ${error} occurred in deserializing the responseBody - ${responseBody}`); + deserializationError.request = msRest.stripRequest(httpRequest); + deserializationError.response = msRest.stripResponse(response); + return callback(deserializationError); + } + } + + return callback(null, result, httpRequest, response); + }); +} + /** * Deletes a Workspace. @@ -674,12 +872,12 @@ function _get(resourceGroupName, workspaceName, options, callback) { * * @param {object} parameters Workspace creation parameters. * - * @param {string} parameters.location The region in which to create the - * Workspace. - * - * @param {object} [parameters.tags] The user specified tags associated with + * @param {string} parameters.location Location. The region in which to create * the Workspace. * + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -1270,6 +1468,7 @@ class Workspaces { this._list = _list; this._listByResourceGroup = _listByResourceGroup; this._create = _create; + this._update = _update; this._deleteMethod = _deleteMethod; this._get = _get; this._beginCreate = _beginCreate; @@ -1480,12 +1679,12 @@ class Workspaces { * * @param {object} parameters Workspace creation parameters. * - * @param {string} parameters.location The region in which to create the - * Workspace. - * - * @param {object} [parameters.tags] The user specified tags associated with + * @param {string} parameters.location Location. The region in which to create * the Workspace. * + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -1523,12 +1722,12 @@ class Workspaces { * * @param {object} parameters Workspace creation parameters. * - * @param {string} parameters.location The region in which to create the - * Workspace. - * - * @param {object} [parameters.tags] The user specified tags associated with + * @param {string} parameters.location Location. The region in which to create * the Workspace. * + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -1576,6 +1775,104 @@ class Workspaces { } } + /** + * Updates properties of a Workspace. + * + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {object} [options] Optional Parameters. + * + * @param {object} [options.tags] Tags. The user specified tags associated with + * the Workspace. + * + * @param {object} [options.customHeaders] Headers that will be added to the + * request + * + * @returns {Promise} A promise is returned + * + * @resolve {HttpOperationResponse} - The deserialized result object. + * + * @reject {Error} - The error object. + */ + updateWithHttpOperationResponse(resourceGroupName, workspaceName, options) { + let client = this.client; + let self = this; + return new Promise((resolve, reject) => { + self._update(resourceGroupName, workspaceName, options, (err, result, request, response) => { + let httpOperationResponse = new msRest.HttpOperationResponse(request, response); + httpOperationResponse.body = result; + if (err) { reject(err); } + else { resolve(httpOperationResponse); } + return; + }); + }); + } + + /** + * Updates properties of a Workspace. + * + * @param {string} resourceGroupName Name of the resource group to which the + * resource belongs. + * + * @param {string} workspaceName The name of the workspace. Workspace names can + * only contain a combination of alphanumeric characters along with dash (-) + * and underscore (_). The name must be from 1 through 64 characters long. + * + * @param {object} [options] Optional Parameters. + * + * @param {object} [options.tags] Tags. The user specified tags associated with + * the Workspace. + * + * @param {object} [options.customHeaders] Headers that will be added to the + * request + * + * @param {function} [optionalCallback] - The optional callback. + * + * @returns {function|Promise} If a callback was passed as the last parameter + * then it returns the callback else returns a Promise. + * + * {Promise} A promise is returned + * + * @resolve {Workspace} - The deserialized result object. + * + * @reject {Error} - The error object. + * + * {function} optionalCallback(err, result, request, response) + * + * {Error} err - The Error object if an error occurred, null otherwise. + * + * {object} [result] - The deserialized result object if an error did not occur. + * See {@link Workspace} for more information. + * + * {object} [request] - The HTTP Request object if an error did not occur. + * + * {stream} [response] - The HTTP Response stream if an error did not occur. + */ + update(resourceGroupName, workspaceName, options, optionalCallback) { + let client = this.client; + let self = this; + if (!optionalCallback && typeof options === 'function') { + optionalCallback = options; + options = null; + } + if (!optionalCallback) { + return new Promise((resolve, reject) => { + self._update(resourceGroupName, workspaceName, options, (err, result, request, response) => { + if (err) { reject(err); } + else { resolve(result); } + return; + }); + }); + } else { + return self._update(resourceGroupName, workspaceName, options, optionalCallback); + } + } + /** * Deletes a Workspace. * @@ -1771,12 +2068,12 @@ class Workspaces { * * @param {object} parameters Workspace creation parameters. * - * @param {string} parameters.location The region in which to create the - * Workspace. - * - * @param {object} [parameters.tags] The user specified tags associated with + * @param {string} parameters.location Location. The region in which to create * the Workspace. * + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the @@ -1814,12 +2111,12 @@ class Workspaces { * * @param {object} parameters Workspace creation parameters. * - * @param {string} parameters.location The region in which to create the - * Workspace. - * - * @param {object} [parameters.tags] The user specified tags associated with + * @param {string} parameters.location Location. The region in which to create * the Workspace. * + * @param {object} [parameters.tags] Tags. The user specified tags associated + * with the Workspace. + * * @param {object} [options] Optional Parameters. * * @param {object} [options.customHeaders] Headers that will be added to the